aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/Makefile14
-rw-r--r--tools/arch/alpha/include/uapi/asm/mman.h2
-rw-r--r--tools/arch/arc/include/uapi/asm/unistd.h51
-rw-r--r--tools/arch/arm64/include/asm/barrier.h10
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h43
-rw-r--r--tools/arch/arm64/include/uapi/asm/unistd.h2
-rw-r--r--tools/arch/csky/include/uapi/asm/perf_regs.h51
-rw-r--r--tools/arch/hexagon/include/uapi/asm/unistd.h40
-rw-r--r--tools/arch/mips/include/uapi/asm/mman.h2
-rw-r--r--tools/arch/parisc/include/uapi/asm/mman.h2
-rw-r--r--tools/arch/powerpc/include/uapi/asm/kvm.h48
-rw-r--r--tools/arch/riscv/include/uapi/asm/bitsperlong.h13
-rw-r--r--tools/arch/riscv/include/uapi/asm/unistd.h42
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h5
-rw-r--r--tools/arch/x86/include/asm/barrier.h7
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h4
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h3
-rw-r--r--tools/arch/x86/include/uapi/asm/perf_regs.h26
-rw-r--r--tools/arch/x86/include/uapi/asm/vmx.h1
-rw-r--r--tools/arch/x86/lib/memcpy_64.S4
-rw-r--r--tools/arch/xtensa/include/uapi/asm/mman.h2
-rw-r--r--tools/bpf/Makefile.helpers1
-rw-r--r--tools/bpf/bpf_asm.c2
-rw-r--r--tools/bpf/bpf_dbg.c2
-rw-r--r--tools/bpf/bpf_jit_disasm.c2
-rw-r--r--tools/bpf/bpftool/.gitignore2
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile1
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-btf.rst222
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst19
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-feature.rst86
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst34
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-net.rst6
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-perf.rst6
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst27
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst10
-rw-r--r--tools/bpf/bpftool/Makefile1
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool190
-rw-r--r--tools/bpf/bpftool/btf.c586
-rw-r--r--tools/bpf/bpftool/btf_dumper.c157
-rw-r--r--tools/bpf/bpftool/cfg.c9
-rw-r--r--tools/bpf/bpftool/cgroup.c13
-rw-r--r--tools/bpf/bpftool/feature.c764
-rw-r--r--tools/bpf/bpftool/main.c4
-rw-r--r--tools/bpf/bpftool/main.h6
-rw-r--r--tools/bpf/bpftool/map.c277
-rw-r--r--tools/bpf/bpftool/net.c54
-rw-r--r--tools/bpf/bpftool/prog.c304
-rw-r--r--tools/bpf/bpftool/xlated_dumper.c3
-rw-r--r--tools/build/Makefile.feature19
-rw-r--r--tools/build/Makefile.include1
-rw-r--r--tools/build/feature/Makefile6
-rw-r--r--tools/build/feature/test-all.c20
-rw-r--r--tools/build/feature/test-get_current_dir_name.c1
-rw-r--r--tools/build/feature/test-libopencsd.c4
-rw-r--r--tools/build/feature/test-libpython.c1
-rw-r--r--tools/build/feature/test-libzstd.c12
-rw-r--r--tools/build/feature/test-reallocarray.c2
-rw-r--r--tools/build/feature/test-sched_getcpu.c2
-rw-r--r--tools/build/feature/test-setns.c1
-rw-r--r--tools/debugging/Makefile16
-rwxr-xr-xtools/debugging/kernel-chktaint202
-rw-r--r--tools/firewire/nosy-dump.c15
-rw-r--r--tools/firmware/ihex2fw.c22
-rw-r--r--tools/gpio/gpio-event-mon.c5
-rw-r--r--tools/gpio/gpio-hammer.c5
-rw-r--r--tools/gpio/gpio-utils.c5
-rw-r--r--tools/gpio/gpio-utils.h4
-rw-r--r--tools/gpio/lsgpio.c5
-rw-r--r--tools/hv/hv_fcopy_daemon.c11
-rw-r--r--tools/hv/hv_vss_daemon.c13
-rw-r--r--tools/iio/iio_event_monitor.c19
-rw-r--r--tools/iio/iio_generic_buffer.c6
-rw-r--r--tools/iio/iio_utils.c5
-rw-r--r--tools/iio/iio_utils.h5
-rw-r--r--tools/iio/lsiio.c5
-rw-r--r--tools/include/asm-generic/barrier.h6
-rw-r--r--tools/include/linux/coresight-pmu.h2
-rw-r--r--tools/include/linux/filter.h41
-rw-r--r--tools/include/linux/log2.h6
-rw-r--r--tools/include/linux/numa.h16
-rw-r--r--tools/include/linux/poison.h3
-rw-r--r--tools/include/linux/rbtree.h66
-rw-r--r--tools/include/linux/rbtree_augmented.h74
-rw-r--r--tools/include/nolibc/nolibc.h (renamed from tools/testing/selftests/rcutorture/bin/nolibc.h)312
-rw-r--r--tools/include/uapi/asm-generic/mman-common-tools.h23
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h4
-rw-r--r--tools/include/uapi/asm-generic/mman.h2
-rw-r--r--tools/include/uapi/asm-generic/unistd.h170
-rw-r--r--tools/include/uapi/drm/drm.h37
-rw-r--r--tools/include/uapi/drm/i915_drm.h240
-rw-r--r--tools/include/uapi/linux/bpf.h632
-rw-r--r--tools/include/uapi/linux/btf.h34
-rw-r--r--tools/include/uapi/linux/ethtool.h51
-rw-r--r--tools/include/uapi/linux/fcntl.h3
-rw-r--r--tools/include/uapi/linux/fs.h3
-rw-r--r--tools/include/uapi/linux/if_link.h1
-rw-r--r--tools/include/uapi/linux/if_xdp.h78
-rw-r--r--tools/include/uapi/linux/in.h9
-rw-r--r--tools/include/uapi/linux/kvm.h15
-rw-r--r--tools/include/uapi/linux/lirc.h12
-rw-r--r--tools/include/uapi/linux/mman.h4
-rw-r--r--tools/include/uapi/linux/mount.h62
-rw-r--r--tools/include/uapi/linux/perf_event.h55
-rw-r--r--tools/include/uapi/linux/prctl.h1
-rw-r--r--tools/include/uapi/linux/sched.h1
-rw-r--r--tools/include/uapi/linux/tc_act/tc_bpf.h2
-rw-r--r--tools/include/uapi/sound/asound.h1
-rw-r--r--tools/io_uring/Makefile18
-rw-r--r--tools/io_uring/README29
-rw-r--r--tools/io_uring/barrier.h16
-rw-r--r--tools/io_uring/io_uring-bench.c592
-rw-r--r--tools/io_uring/io_uring-cp.c260
-rw-r--r--tools/io_uring/liburing.h183
-rw-r--r--tools/io_uring/queue.c156
-rw-r--r--tools/io_uring/setup.c107
-rw-r--r--tools/io_uring/syscall.c52
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat19
-rw-r--r--tools/kvm/kvm_stat/kvm_stat.txt2
-rw-r--r--tools/laptop/freefall/freefall.c3
-rw-r--r--tools/lib/api/fd/array.c3
-rw-r--r--tools/lib/bitmap.c4
-rw-r--r--tools/lib/bpf/.gitignore2
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/Makefile91
-rw-r--r--tools/lib/bpf/README.rst16
-rw-r--r--tools/lib/bpf/bpf.c114
-rw-r--r--tools/lib/bpf/bpf.h12
-rw-r--r--tools/lib/bpf/btf.c2348
-rw-r--r--tools/lib/bpf/btf.h49
-rw-r--r--tools/lib/bpf/libbpf.c1223
-rw-r--r--tools/lib/bpf/libbpf.h114
-rw-r--r--tools/lib/bpf/libbpf.map40
-rw-r--r--tools/lib/bpf/libbpf.pc.template12
-rw-r--r--tools/lib/bpf/libbpf_internal.h40
-rw-r--r--tools/lib/bpf/libbpf_probes.c320
-rw-r--r--tools/lib/bpf/libbpf_util.h47
-rw-r--r--tools/lib/bpf/netlink.c85
-rw-r--r--tools/lib/bpf/test_libbpf.cpp4
-rw-r--r--tools/lib/bpf/xsk.c735
-rw-r--r--tools/lib/bpf/xsk.h213
-rw-r--r--tools/lib/find_bit.c6
-rw-r--r--tools/lib/lockdep/include/liblockdep/common.h2
-rw-r--r--tools/lib/lockdep/include/liblockdep/mutex.h11
-rwxr-xr-xtools/lib/lockdep/run_tests.sh6
-rw-r--r--tools/lib/lockdep/tests/ABBA.c9
-rw-r--r--tools/lib/rbtree.c192
-rw-r--r--tools/lib/traceevent/Documentation/Makefile207
-rw-r--r--tools/lib/traceevent/Documentation/asciidoc.conf120
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-commands.txt153
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-cpus.txt77
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-endian_read.txt78
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-event_find.txt103
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-event_get.txt99
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-event_list.txt122
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-field_find.txt118
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-field_get_val.txt122
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-field_print.txt126
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-field_read.txt81
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-fields.txt105
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-file_endian.txt91
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-filter.txt209
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt183
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-func_find.txt88
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-handle.txt101
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-header_page.txt102
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-host_endian.txt104
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-long_size.txt78
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-page_size.txt82
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-parse_event.txt90
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-parse_head.txt82
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-record_parse.txt137
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-reg_event_handler.txt156
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-reg_print_func.txt155
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-set_flag.txt104
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-strerror.txt85
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-tseq.txt158
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent.txt203
-rw-r--r--tools/lib/traceevent/Documentation/manpage-1.72.xsl14
-rw-r--r--tools/lib/traceevent/Documentation/manpage-base.xsl35
-rw-r--r--tools/lib/traceevent/Documentation/manpage-bold-literal.xsl17
-rw-r--r--tools/lib/traceevent/Documentation/manpage-normal.xsl13
-rw-r--r--tools/lib/traceevent/Documentation/manpage-suppress-sp.xsl21
-rw-r--r--tools/lib/traceevent/Makefile46
-rw-r--r--tools/lib/traceevent/event-parse-api.c278
-rw-r--r--tools/lib/traceevent/event-parse-local.h6
-rw-r--r--tools/lib/traceevent/event-parse.c913
-rw-r--r--tools/lib/traceevent/event-parse.h154
-rw-r--r--tools/lib/traceevent/event-plugin.c32
-rw-r--r--tools/lib/traceevent/kbuffer-parse.c49
-rw-r--r--tools/lib/traceevent/kbuffer.h13
-rw-r--r--tools/lib/traceevent/libtraceevent.pc.template4
-rw-r--r--tools/lib/traceevent/parse-filter.c216
-rw-r--r--tools/lib/traceevent/parse-utils.c2
-rw-r--r--tools/lib/traceevent/plugin_cfg80211.c8
-rw-r--r--tools/lib/traceevent/plugin_function.c14
-rw-r--r--tools/lib/traceevent/plugin_hrtimer.c12
-rw-r--r--tools/lib/traceevent/plugin_jbd2.c12
-rw-r--r--tools/lib/traceevent/plugin_kmem.c32
-rw-r--r--tools/lib/traceevent/plugin_kvm.c48
-rw-r--r--tools/lib/traceevent/plugin_mac80211.c8
-rw-r--r--tools/lib/traceevent/plugin_sched_switch.c18
-rw-r--r--tools/lib/traceevent/plugin_scsi.c8
-rw-r--r--tools/lib/traceevent/plugin_xen.c8
-rw-r--r--tools/memory-model/.gitignore1
-rw-r--r--tools/memory-model/Documentation/explanation.txt289
-rw-r--r--tools/memory-model/README35
-rw-r--r--tools/memory-model/linux-kernel.bell38
-rw-r--r--tools/memory-model/linux-kernel.cat43
-rw-r--r--tools/memory-model/linux-kernel.def7
-rw-r--r--tools/memory-model/lock.cat3
-rw-r--r--tools/memory-model/scripts/README70
-rwxr-xr-xtools/memory-model/scripts/checkalllitmus.sh53
-rw-r--r--tools/memory-model/scripts/checkghlitmus.sh65
-rwxr-xr-xtools/memory-model/scripts/checklitmus.sh74
-rw-r--r--tools/memory-model/scripts/checklitmushist.sh60
-rw-r--r--tools/memory-model/scripts/cmplitmushist.sh87
-rw-r--r--tools/memory-model/scripts/initlitmushist.sh68
-rw-r--r--tools/memory-model/scripts/judgelitmus.sh78
-rw-r--r--tools/memory-model/scripts/newlitmushist.sh61
-rw-r--r--tools/memory-model/scripts/parseargs.sh136
-rw-r--r--tools/memory-model/scripts/runlitmushist.sh87
-rw-r--r--tools/objtool/Documentation/stack-validation.txt4
-rw-r--r--tools/objtool/Makefile10
-rw-r--r--tools/objtool/arch.h22
-rw-r--r--tools/objtool/arch/x86/decode.c35
-rw-r--r--tools/objtool/arch/x86/include/asm/inat.h16
-rw-r--r--tools/objtool/arch/x86/include/asm/inat_types.h16
-rw-r--r--tools/objtool/arch/x86/include/asm/insn.h15
-rw-r--r--tools/objtool/arch/x86/include/asm/orc_types.h14
-rw-r--r--tools/objtool/arch/x86/lib/inat.c16
-rw-r--r--tools/objtool/arch/x86/lib/insn.c15
-rw-r--r--tools/objtool/builtin-check.c18
-rw-r--r--tools/objtool/builtin-orc.c14
-rw-r--r--tools/objtool/builtin.h16
-rw-r--r--tools/objtool/cfi.h14
-rw-r--r--tools/objtool/check.c429
-rw-r--r--tools/objtool/check.h18
-rw-r--r--tools/objtool/elf.c29
-rw-r--r--tools/objtool/elf.h17
-rw-r--r--tools/objtool/objtool.c14
-rw-r--r--tools/objtool/orc.h14
-rw-r--r--tools/objtool/orc_dump.c14
-rw-r--r--tools/objtool/orc_gen.c14
-rw-r--r--tools/objtool/special.c32
-rw-r--r--tools/objtool/special.h15
-rw-r--r--tools/objtool/warn.h22
-rw-r--r--tools/pci/Makefile10
-rw-r--r--tools/pci/pcitest.c21
-rw-r--r--tools/pcmcia/crc32hash.c1
-rw-r--r--tools/perf/Build10
-rw-r--r--tools/perf/Documentation/Build.txt24
-rw-r--r--tools/perf/Documentation/Makefile1
-rw-r--r--tools/perf/Documentation/intel-pt.txt10
-rw-r--r--tools/perf/Documentation/perf-config.txt47
-rw-r--r--tools/perf/Documentation/perf-diff.txt56
-rw-r--r--tools/perf/Documentation/perf-list.txt12
-rw-r--r--tools/perf/Documentation/perf-record.txt50
-rw-r--r--tools/perf/Documentation/perf-report.txt13
-rw-r--r--tools/perf/Documentation/perf-script.txt9
-rw-r--r--tools/perf/Documentation/perf-stat.txt9
-rw-r--r--tools/perf/Documentation/perf-trace.txt8
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt35
-rw-r--r--tools/perf/Documentation/perf.txt2
-rw-r--r--tools/perf/Documentation/tips.txt7
-rw-r--r--tools/perf/Makefile.config56
-rw-r--r--tools/perf/Makefile.perf36
-rw-r--r--tools/perf/arch/Build4
-rw-r--r--tools/perf/arch/arm/Build4
-rw-r--r--tools/perf/arch/arm/Makefile1
-rw-r--r--tools/perf/arch/arm/tests/Build8
-rw-r--r--tools/perf/arch/arm/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/arm/util/Build8
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c98
-rw-r--r--tools/perf/arch/arm/util/cs-etm.h3
-rw-r--r--tools/perf/arch/arm/util/dwarf-regs.c5
-rw-r--r--tools/perf/arch/arm/util/pmu.c3
-rw-r--r--tools/perf/arch/arm64/Build4
-rw-r--r--tools/perf/arch/arm64/annotate/instructions.c2
-rwxr-xr-xtools/perf/arch/arm64/entry/syscalls/mksyscalltbl2
-rw-r--r--tools/perf/arch/arm64/tests/Build6
-rw-r--r--tools/perf/arch/arm64/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/arm64/util/Build12
-rw-r--r--tools/perf/arch/arm64/util/dwarf-regs.c5
-rw-r--r--tools/perf/arch/arm64/util/sym-handling.c4
-rw-r--r--tools/perf/arch/csky/Build1
-rw-r--r--tools/perf/arch/csky/Makefile4
-rw-r--r--tools/perf/arch/csky/include/perf_regs.h100
-rw-r--r--tools/perf/arch/csky/util/Build2
-rw-r--r--tools/perf/arch/csky/util/dwarf-regs.c49
-rw-r--r--tools/perf/arch/csky/util/unwind-libdw.c77
-rw-r--r--tools/perf/arch/nds32/Build2
-rw-r--r--tools/perf/arch/nds32/util/Build2
-rw-r--r--tools/perf/arch/powerpc/Build4
-rw-r--r--tools/perf/arch/powerpc/tests/Build6
-rw-r--r--tools/perf/arch/powerpc/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/powerpc/util/Build18
-rw-r--r--tools/perf/arch/powerpc/util/dwarf-regs.c6
-rw-r--r--tools/perf/arch/powerpc/util/kvm-stat.c2
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c9
-rw-r--r--tools/perf/arch/powerpc/util/sym-handling.c4
-rw-r--r--tools/perf/arch/powerpc/util/unwind-libunwind.c6
-rw-r--r--tools/perf/arch/s390/Build2
-rw-r--r--tools/perf/arch/s390/Makefile1
-rw-r--r--tools/perf/arch/s390/annotate/instructions.c2
-rw-r--r--tools/perf/arch/s390/util/Build12
-rw-r--r--tools/perf/arch/s390/util/header.c5
-rw-r--r--tools/perf/arch/s390/util/kvm-stat.c6
-rw-r--r--tools/perf/arch/s390/util/machine.c9
-rw-r--r--tools/perf/arch/sh/Build2
-rw-r--r--tools/perf/arch/sh/Makefile1
-rw-r--r--tools/perf/arch/sh/util/Build2
-rw-r--r--tools/perf/arch/sh/util/dwarf-regs.c16
-rw-r--r--tools/perf/arch/sparc/Build2
-rw-r--r--tools/perf/arch/sparc/Makefile1
-rw-r--r--tools/perf/arch/sparc/util/Build2
-rw-r--r--tools/perf/arch/sparc/util/dwarf-regs.c6
-rw-r--r--tools/perf/arch/x86/Build4
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl16
-rw-r--r--tools/perf/arch/x86/include/perf_regs.h25
-rw-r--r--tools/perf/arch/x86/tests/Build14
-rw-r--r--tools/perf/arch/x86/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/x86/tests/gen-insn-x86-dat.awk9
-rwxr-xr-xtools/perf/arch/x86/tests/gen-insn-x86-dat.sh9
-rw-r--r--tools/perf/arch/x86/util/Build31
-rw-r--r--tools/perf/arch/x86/util/archinsn.c26
-rw-r--r--tools/perf/arch/x86/util/auxtrace.c11
-rw-r--r--tools/perf/arch/x86/util/dwarf-regs.c16
-rw-r--r--tools/perf/arch/x86/util/intel-bts.c11
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c11
-rw-r--r--tools/perf/arch/x86/util/kvm-stat.c1
-rw-r--r--tools/perf/arch/x86/util/perf_regs.c44
-rw-r--r--tools/perf/arch/xtensa/Build2
-rw-r--r--tools/perf/arch/xtensa/Makefile1
-rw-r--r--tools/perf/arch/xtensa/util/Build2
-rw-r--r--tools/perf/arch/xtensa/util/dwarf-regs.c6
-rw-r--r--tools/perf/bench/epoll-ctl.c2
-rw-r--r--tools/perf/bench/epoll-wait.c2
-rw-r--r--tools/perf/bench/numa.c11
-rw-r--r--tools/perf/builtin-annotate.c13
-rw-r--r--tools/perf/builtin-buildid-cache.c4
-rw-r--r--tools/perf/builtin-buildid-list.c8
-rw-r--r--tools/perf/builtin-c2c.c31
-rw-r--r--tools/perf/builtin-diff.c190
-rw-r--r--tools/perf/builtin-evlist.c4
-rw-r--r--tools/perf/builtin-ftrace.c3
-rw-r--r--tools/perf/builtin-inject.c16
-rw-r--r--tools/perf/builtin-kallsyms.c4
-rw-r--r--tools/perf/builtin-kmem.c9
-rw-r--r--tools/perf/builtin-kvm.c8
-rw-r--r--tools/perf/builtin-list.c14
-rw-r--r--tools/perf/builtin-lock.c8
-rw-r--r--tools/perf/builtin-mem.c9
-rw-r--r--tools/perf/builtin-probe.c17
-rw-r--r--tools/perf/builtin-record.c414
-rw-r--r--tools/perf/builtin-report.c141
-rw-r--r--tools/perf/builtin-sched.c63
-rw-r--r--tools/perf/builtin-script.c199
-rw-r--r--tools/perf/builtin-stat.c72
-rw-r--r--tools/perf/builtin-timechart.c14
-rw-r--r--tools/perf/builtin-top.c87
-rw-r--r--tools/perf/builtin-trace.c41
-rw-r--r--tools/perf/builtin-version.c2
-rw-r--r--tools/perf/builtin.h3
-rwxr-xr-xtools/perf/check-headers.sh2
-rw-r--r--tools/perf/design.txt4
-rw-r--r--tools/perf/examples/bpf/augmented_raw_syscalls.c212
-rw-r--r--tools/perf/examples/bpf/augmented_syscalls.c22
-rw-r--r--tools/perf/examples/bpf/etcsnoop.c18
-rw-r--r--tools/perf/include/bpf/bpf.h22
-rw-r--r--tools/perf/perf-with-kcore.sh9
-rw-r--r--tools/perf/perf.c1
-rw-r--r--tools/perf/perf.h11
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json179
-rw-r--r--tools/perf/pmu-events/arch/arm64/mapfile.csv5
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/metrics.json2245
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/other.json594
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/metrics.json1982
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/extended.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/branch.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/cache.json287
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/core.json134
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json168
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/memory.json162
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/other.json65
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json260
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/cache.json1630
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/floating-point.json51
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/frontend.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/memory.json1640
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/pipeline.json36
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json278
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/cache.json161
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/memory.json148
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json50
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json306
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/cache.json980
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/memory.json260
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/pipeline.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/cache.json74
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/cache.json175
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/floating-point.json33
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json234
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/memory.json172
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/pipeline.json33
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/cache.json173
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json252
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/memory.json172
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/pipeline.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/cache.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json250
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json256
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/pipeline.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/cache.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json150
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/pipeline.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/cache.json666
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/memory.json268
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json15
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/cache.json680
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json126
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/frontend.json268
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/memory.json68
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/other.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json1328
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json144
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json108
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/pipeline.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/cache.json2157
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/frontend.json14
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/memory.json1121
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/pipeline.json39
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json274
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/cache.json786
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/frontend.json234
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/memory.json751
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/pipeline.json173
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json304
-rw-r--r--tools/perf/pmu-events/jevents.c2
-rwxr-xr-xtools/perf/python/twatch.py9
-rw-r--r--tools/perf/scripts/Build4
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Build2
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Context.c17
-rw-r--r--tools/perf/scripts/perl/rw-by-file.pl2
-rw-r--r--tools/perf/scripts/perl/rw-by-pid.pl2
-rw-r--r--tools/perf/scripts/perl/rwtop.pl2
-rw-r--r--tools/perf/scripts/perl/wakeup-latency.pl2
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Build2
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Context.c16
-rw-r--r--tools/perf/scripts/python/check-perf-trace.py76
-rw-r--r--tools/perf/scripts/python/compaction-times.py8
-rw-r--r--tools/perf/scripts/python/event_analyzing_sample.py48
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py77
-rw-r--r--tools/perf/scripts/python/export-to-sqlite.py40
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py1298
-rw-r--r--tools/perf/scripts/python/failed-syscalls-by-pid.py43
-rw-r--r--tools/perf/scripts/python/futex-contention.py10
-rw-r--r--tools/perf/scripts/python/intel-pt-events.py60
-rw-r--r--tools/perf/scripts/python/mem-phys-addr.py25
-rwxr-xr-xtools/perf/scripts/python/net_dropmonitor.py12
-rw-r--r--tools/perf/scripts/python/netdev-times.py92
-rw-r--r--tools/perf/scripts/python/powerpc-hcalls.py18
-rw-r--r--tools/perf/scripts/python/sched-migration.py8
-rw-r--r--tools/perf/scripts/python/sctop.py27
-rwxr-xr-xtools/perf/scripts/python/stackcollapse.py9
-rw-r--r--tools/perf/scripts/python/stat-cpi.py11
-rw-r--r--tools/perf/scripts/python/syscall-counts-by-pid.py51
-rw-r--r--tools/perf/scripts/python/syscall-counts.py35
-rw-r--r--tools/perf/tests/attr.py1
-rw-r--r--tools/perf/tests/attr/test-record-C02
-rw-r--r--tools/perf/tests/attr/test-record-basic2
-rw-r--r--tools/perf/tests/attr/test-record-branch-any2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any_call2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any_ret2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-hv2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-ind_call2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-k2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-u2
-rw-r--r--tools/perf/tests/attr/test-record-count2
-rw-r--r--tools/perf/tests/attr/test-record-data2
-rw-r--r--tools/perf/tests/attr/test-record-freq2
-rw-r--r--tools/perf/tests/attr/test-record-graph-default2
-rw-r--r--tools/perf/tests/attr/test-record-graph-dwarf2
-rw-r--r--tools/perf/tests/attr/test-record-graph-fp2
-rw-r--r--tools/perf/tests/attr/test-record-group2
-rw-r--r--tools/perf/tests/attr/test-record-group-sampling2
-rw-r--r--tools/perf/tests/attr/test-record-group12
-rw-r--r--tools/perf/tests/attr/test-record-no-buffering2
-rw-r--r--tools/perf/tests/attr/test-record-no-inherit2
-rw-r--r--tools/perf/tests/attr/test-record-no-samples2
-rw-r--r--tools/perf/tests/attr/test-record-period2
-rw-r--r--tools/perf/tests/attr/test-record-raw2
-rw-r--r--tools/perf/tests/backward-ring-buffer.c2
-rw-r--r--tools/perf/tests/bp_account.c1
-rw-r--r--tools/perf/tests/code-reading.c2
-rw-r--r--tools/perf/tests/dso-data.c4
-rw-r--r--tools/perf/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/tests/evsel-tp-sched.c7
-rw-r--r--tools/perf/tests/expr.c5
-rw-r--r--tools/perf/tests/hists_common.c9
-rw-r--r--tools/perf/tests/hists_cumulate.c15
-rw-r--r--tools/perf/tests/hists_filter.c1
-rw-r--r--tools/perf/tests/hists_link.c8
-rw-r--r--tools/perf/tests/hists_output.c33
-rw-r--r--tools/perf/tests/make2
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c1
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c4
-rw-r--r--tools/perf/tests/parse-events.c30
-rw-r--r--tools/perf/tests/pmu.c2
-rw-r--r--tools/perf/tests/sample-parsing.c2
-rw-r--r--tools/perf/tests/sdt.c1
-rw-r--r--tools/perf/tests/shell/lib/probe.sh5
-rwxr-xr-xtools/perf/tests/shell/record+zstd_comp_decomp.sh34
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh1
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c9
-rw-r--r--tools/perf/trace/beauty/Build26
-rw-r--r--tools/perf/trace/beauty/ioctl.c2
-rwxr-xr-xtools/perf/trace/beauty/mmap_flags.sh14
-rw-r--r--tools/perf/trace/beauty/msg_flags.c2
-rw-r--r--tools/perf/trace/beauty/renameat.c1
-rw-r--r--tools/perf/trace/beauty/waitid_options.c2
-rw-r--r--tools/perf/trace/strace/groups/string65
-rw-r--r--tools/perf/ui/Build18
-rw-r--r--tools/perf/ui/browser.c10
-rw-r--r--tools/perf/ui/browsers/Build11
-rw-r--r--tools/perf/ui/browsers/annotate.c3
-rw-r--r--tools/perf/ui/browsers/header.c2
-rw-r--r--tools/perf/ui/browsers/hists.c164
-rw-r--r--tools/perf/ui/browsers/map.c1
-rw-r--r--tools/perf/ui/browsers/res_sample.c91
-rw-r--r--tools/perf/ui/browsers/scripts.c274
-rw-r--r--tools/perf/ui/gtk/annotate.c3
-rw-r--r--tools/perf/ui/gtk/hists.c7
-rw-r--r--tools/perf/ui/hist.c1
-rw-r--r--tools/perf/ui/stdio/hist.c7
-rw-r--r--tools/perf/ui/tui/Build8
-rw-r--r--tools/perf/util/Build278
-rw-r--r--tools/perf/util/annotate.c248
-rw-r--r--tools/perf/util/annotate.h22
-rw-r--r--tools/perf/util/archinsn.h12
-rw-r--r--tools/perf/util/auxtrace.c44
-rw-r--r--tools/perf/util/auxtrace.h16
-rw-r--r--tools/perf/util/block-range.c2
-rw-r--r--tools/perf/util/block-range.h6
-rw-r--r--tools/perf/util/bpf-event.c480
-rw-r--r--tools/perf/util/bpf-event.h74
-rw-r--r--tools/perf/util/bpf-loader.c31
-rw-r--r--tools/perf/util/bpf-loader.h7
-rw-r--r--tools/perf/util/bpf_map.c72
-rw-r--r--tools/perf/util/bpf_map.h22
-rw-r--r--tools/perf/util/branch.h27
-rw-r--r--tools/perf/util/build-id.c15
-rw-r--r--tools/perf/util/build-id.h3
-rw-r--r--tools/perf/util/c++/Build4
-rw-r--r--tools/perf/util/c++/clang.cpp2
-rw-r--r--tools/perf/util/call-path.c11
-rw-r--r--tools/perf/util/call-path.h11
-rw-r--r--tools/perf/util/callchain.c17
-rw-r--r--tools/perf/util/callchain.h21
-rw-r--r--tools/perf/util/cloexec.c1
-rw-r--r--tools/perf/util/color.c39
-rw-r--r--tools/perf/util/color.h1
-rw-r--r--tools/perf/util/color_config.c47
-rw-r--r--tools/perf/util/comm.c1
-rw-r--r--tools/perf/util/comm.h4
-rw-r--r--tools/perf/util/compress.h53
-rw-r--r--tools/perf/util/config.c4
-rw-r--r--tools/perf/util/cpu-set-sched.h50
-rw-r--r--tools/perf/util/cpumap.c12
-rw-r--r--tools/perf/util/cpumap.h1
-rw-r--r--tools/perf/util/cputopo.c277
-rw-r--r--tools/perf/util/cputopo.h33
-rw-r--r--tools/perf/util/cs-etm-decoder/Build2
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c42
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.h16
-rw-r--r--tools/perf/util/cs-etm.c846
-rw-r--r--tools/perf/util/cs-etm.h57
-rw-r--r--tools/perf/util/data-convert-bt.c13
-rw-r--r--tools/perf/util/data.c270
-rw-r--r--tools/perf/util/data.h28
-rw-r--r--tools/perf/util/db-export.c27
-rw-r--r--tools/perf/util/db-export.h14
-rw-r--r--tools/perf/util/drv_configs.c78
-rw-r--r--tools/perf/util/drv_configs.h26
-rw-r--r--tools/perf/util/dso.c54
-rw-r--r--tools/perf/util/dso.h25
-rw-r--r--tools/perf/util/dwarf-aux.c16
-rw-r--r--tools/perf/util/dwarf-aux.h16
-rw-r--r--tools/perf/util/env.c159
-rw-r--r--tools/perf/util/env.h35
-rw-r--r--tools/perf/util/event.c44
-rw-r--r--tools/perf/util/event.h68
-rw-r--r--tools/perf/util/evlist.c148
-rw-r--r--tools/perf/util/evlist.h21
-rw-r--r--tools/perf/util/evsel.c141
-rw-r--r--tools/perf/util/evsel.h19
-rw-r--r--tools/perf/util/genelf.c3
-rw-r--r--tools/perf/util/genelf_debug.c3
-rw-r--r--tools/perf/util/header.c642
-rw-r--r--tools/perf/util/header.h8
-rw-r--r--tools/perf/util/hist.c325
-rw-r--r--tools/perf/util/hist.h50
-rw-r--r--tools/perf/util/intel-bts.c37
-rw-r--r--tools/perf/util/intel-bts.h11
-rw-r--r--tools/perf/util/intel-pt-decoder/Build2
-rw-r--r--tools/perf/util/intel-pt-decoder/inat.c16
-rw-r--r--tools/perf/util/intel-pt-decoder/inat.h16
-rw-r--r--tools/perf/util/intel-pt-decoder/inat_types.h16
-rw-r--r--tools/perf/util/intel-pt-decoder/insn.c15
-rw-r--r--tools/perf/util/intel-pt-decoder/insn.h15
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c101
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.h11
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c11
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h11
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-log.c11
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-log.h11
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c11
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h11
-rw-r--r--tools/perf/util/intel-pt.c39
-rw-r--r--tools/perf/util/intel-pt.h11
-rw-r--r--tools/perf/util/intlist.c3
-rw-r--r--tools/perf/util/intlist.h2
-rw-r--r--tools/perf/util/jitdump.c1
-rw-r--r--tools/perf/util/jitdump.h1
-rw-r--r--tools/perf/util/kvm-stat.h7
-rw-r--r--tools/perf/util/machine.c172
-rw-r--r--tools/perf/util/machine.h17
-rw-r--r--tools/perf/util/map.c52
-rw-r--r--tools/perf/util/map.h104
-rw-r--r--tools/perf/util/map_groups.h91
-rw-r--r--tools/perf/util/map_symbol.h22
-rw-r--r--tools/perf/util/metricgroup.c21
-rw-r--r--tools/perf/util/metricgroup.h3
-rw-r--r--tools/perf/util/mmap.c212
-rw-r--r--tools/perf/util/mmap.h18
-rw-r--r--tools/perf/util/namespaces.c4
-rw-r--r--tools/perf/util/namespaces.h4
-rw-r--r--tools/perf/util/ordered-events.c2
-rw-r--r--tools/perf/util/parse-events.c89
-rw-r--r--tools/perf/util/parse-events.h6
-rw-r--r--tools/perf/util/parse-events.l12
-rw-r--r--tools/perf/util/parse-events.y16
-rw-r--r--tools/perf/util/parse-regs-options.c33
-rw-r--r--tools/perf/util/parse-regs-options.h3
-rw-r--r--tools/perf/util/perf_regs.c10
-rw-r--r--tools/perf/util/perf_regs.h3
-rw-r--r--tools/perf/util/pmu.c38
-rw-r--r--tools/perf/util/pmu.h6
-rw-r--r--tools/perf/util/probe-event.c37
-rw-r--r--tools/perf/util/probe-event.h5
-rw-r--r--tools/perf/util/probe-file.c13
-rw-r--r--tools/perf/util/probe-finder.c16
-rw-r--r--tools/perf/util/python.c2
-rw-r--r--tools/perf/util/rb_resort.h8
-rw-r--r--tools/perf/util/rblist.c31
-rw-r--r--tools/perf/util/rblist.h2
-rw-r--r--tools/perf/util/s390-cpumcf-kernel.h62
-rw-r--r--tools/perf/util/s390-cpumsf.c89
-rw-r--r--tools/perf/util/s390-sample-raw.c222
-rw-r--r--tools/perf/util/sample-raw.c18
-rw-r--r--tools/perf/util/sample-raw.h14
-rw-r--r--tools/perf/util/scripting-engines/Build4
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c4
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c27
-rw-r--r--tools/perf/util/session.c298
-rw-r--r--tools/perf/util/session.h14
-rw-r--r--tools/perf/util/setup.py7
-rw-r--r--tools/perf/util/sort.c109
-rw-r--r--tools/perf/util/sort.h19
-rw-r--r--tools/perf/util/srccode.c10
-rw-r--r--tools/perf/util/srccode.h13
-rw-r--r--tools/perf/util/srcline.c45
-rw-r--r--tools/perf/util/srcline.h13
-rw-r--r--tools/perf/util/stat-display.c126
-rw-r--r--tools/perf/util/stat-shadow.c2
-rw-r--r--tools/perf/util/stat.c20
-rw-r--r--tools/perf/util/strlist.c3
-rw-r--r--tools/perf/util/strlist.h2
-rw-r--r--tools/perf/util/svghelper.c6
-rw-r--r--tools/perf/util/symbol-elf.c2
-rw-r--r--tools/perf/util/symbol-minimal.c1
-rw-r--r--tools/perf/util/symbol.c95
-rw-r--r--tools/perf/util/symbol.h102
-rw-r--r--tools/perf/util/symbol_conf.h76
-rw-r--r--tools/perf/util/symbol_fprintf.c3
-rw-r--r--tools/perf/util/syscalltbl.c10
-rw-r--r--tools/perf/util/target.c3
-rw-r--r--tools/perf/util/thread-stack.c292
-rw-r--r--tools/perf/util/thread-stack.h20
-rw-r--r--tools/perf/util/thread.c42
-rw-r--r--tools/perf/util/thread.h11
-rw-r--r--tools/perf/util/time-utils.c59
-rw-r--r--tools/perf/util/time-utils.h7
-rw-r--r--tools/perf/util/tool.h7
-rw-r--r--tools/perf/util/top.c3
-rw-r--r--tools/perf/util/trace-event-info.c18
-rw-r--r--tools/perf/util/trace-event-parse.c20
-rw-r--r--tools/perf/util/trace-event-read.c20
-rw-r--r--tools/perf/util/trace-event-scripting.c16
-rw-r--r--tools/perf/util/trace-event.c4
-rw-r--r--tools/perf/util/unwind-libdw.c2
-rw-r--r--tools/perf/util/unwind-libunwind-local.c7
-rw-r--r--tools/perf/util/unwind-libunwind.c11
-rw-r--r--tools/perf/util/util.c82
-rw-r--r--tools/perf/util/util.h3
-rw-r--r--tools/perf/util/vdso.c1
-rw-r--r--tools/perf/util/zlib.c1
-rw-r--r--tools/perf/util/zstd.c111
-rw-r--r--tools/power/acpi/Makefile5
-rw-r--r--tools/power/acpi/Makefile.config5
-rw-r--r--tools/power/acpi/Makefile.rules5
-rw-r--r--tools/power/acpi/common/cmfsize.c2
-rw-r--r--tools/power/acpi/common/getopt.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c50
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixdir.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixmap.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixxf.c2
-rw-r--r--tools/power/acpi/tools/acpidbg/Makefile5
-rw-r--r--tools/power/acpi/tools/acpidbg/acpidbg.c5
-rw-r--r--tools/power/acpi/tools/acpidump/Makefile5
-rw-r--r--tools/power/acpi/tools/acpidump/acpidump.h2
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c10
-rw-r--r--tools/power/acpi/tools/acpidump/apfiles.c10
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c2
-rw-r--r--tools/power/acpi/tools/ec/Makefile5
-rw-r--r--tools/power/acpi/tools/ec/ec_access.c3
-rw-r--r--tools/power/cpupower/Makefile14
-rw-r--r--tools/power/cpupower/bench/benchmark.c15
-rw-r--r--tools/power/cpupower/bench/benchmark.h15
-rw-r--r--tools/power/cpupower/bench/config.h15
-rw-r--r--tools/power/cpupower/bench/cpufreq-bench_plot.sh15
-rw-r--r--tools/power/cpupower/bench/cpufreq-bench_script.sh15
-rw-r--r--tools/power/cpupower/bench/main.c15
-rw-r--r--tools/power/cpupower/bench/parse.c15
-rw-r--r--tools/power/cpupower/bench/parse.h15
-rw-r--r--tools/power/cpupower/bench/system.c15
-rw-r--r--tools/power/cpupower/bench/system.h15
-rw-r--r--tools/power/cpupower/debug/i386/centrino-decode.c3
-rw-r--r--tools/power/cpupower/debug/i386/intel_gsic.c3
-rw-r--r--tools/power/cpupower/debug/i386/powernow-k8-decode.c3
-rw-r--r--tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c1
-rw-r--r--tools/power/cpupower/lib/cpufreq.c22
-rw-r--r--tools/power/cpupower/lib/cpufreq.h26
-rw-r--r--tools/power/cpupower/lib/cpuidle.c3
-rw-r--r--tools/power/cpupower/lib/cpupower.c3
-rw-r--r--tools/power/cpupower/utils/cpufreq-info.c45
-rw-r--r--tools/power/cpupower/utils/cpufreq-set.c3
-rw-r--r--tools/power/cpupower/utils/cpuidle-info.c3
-rw-r--r--tools/power/cpupower/utils/cpupower-info.c3
-rw-r--r--tools/power/cpupower/utils/cpupower-set.c3
-rw-r--r--tools/power/cpupower/utils/cpupower.c3
-rw-r--r--tools/power/cpupower/utils/helpers/helpers.h3
-rw-r--r--tools/power/cpupower/utils/helpers/sysfs.c3
-rw-r--r--tools/power/cpupower/utils/helpers/topology.c3
-rw-r--r--tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c3
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c4
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c4
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h4
-rw-r--r--tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c3
-rw-r--r--tools/power/cpupower/utils/idle_monitor/idle_monitors.h4
-rw-r--r--tools/power/cpupower/utils/idle_monitor/mperf_monitor.c3
-rw-r--r--tools/power/cpupower/utils/idle_monitor/nhm_idle.c3
-rw-r--r--tools/power/cpupower/utils/idle_monitor/snb_idle.c3
-rwxr-xr-xtools/power/pm-graph/bootgraph.py10
-rwxr-xr-xtools/power/pm-graph/sleepgraph.py10
-rwxr-xr-xtools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py1
-rw-r--r--tools/power/x86/turbostat/Makefile2
-rw-r--r--tools/power/x86/turbostat/turbostat.c294
-rw-r--r--tools/power/x86/x86_energy_perf_policy/Makefile2
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c3
-rw-r--r--tools/spi/Makefile1
-rw-r--r--tools/spi/spidev_test.c5
-rwxr-xr-xtools/testing/ktest/config-bisect.pl3
-rwxr-xr-xtools/testing/ktest/ktest.pl124
-rw-r--r--tools/testing/ktest/sample.conf24
-rw-r--r--tools/testing/nvdimm/Kbuild10
-rw-r--r--tools/testing/nvdimm/dax-dev.c26
-rw-r--r--tools/testing/nvdimm/dax_pmem_compat_test.c8
-rw-r--r--tools/testing/nvdimm/dax_pmem_core_test.c8
-rw-r--r--tools/testing/nvdimm/dax_pmem_test.c8
-rw-r--r--tools/testing/nvdimm/pmem-dax.c10
-rw-r--r--tools/testing/nvdimm/test/iomap.c12
-rw-r--r--tools/testing/nvdimm/test/nfit.c30
-rw-r--r--tools/testing/nvdimm/test/nfit_test.h10
-rw-r--r--tools/testing/nvdimm/watermark.h3
-rw-r--r--tools/testing/radix-tree/benchmark.c10
-rw-r--r--tools/testing/radix-tree/idr-test.c56
-rw-r--r--tools/testing/radix-tree/iteration_check.c10
-rw-r--r--tools/testing/radix-tree/multiorder.c10
-rw-r--r--tools/testing/scatterlist/Makefile1
-rw-r--r--tools/testing/selftests/.gitignore1
-rw-r--r--tools/testing/selftests/Makefile102
-rw-r--r--tools/testing/selftests/android/Makefile1
-rw-r--r--tools/testing/selftests/android/ion/Makefile1
-rw-r--r--tools/testing/selftests/android/ion/ion.h11
-rw-r--r--tools/testing/selftests/android/ion/ionapp_export.c11
-rw-r--r--tools/testing/selftests/android/ion/ionapp_import.c11
-rw-r--r--tools/testing/selftests/bpf/.gitignore6
-rw-r--r--tools/testing/selftests/bpf/Makefile178
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h100
-rw-r--r--tools/testing/selftests/bpf/bpf_util.h9
-rw-r--r--tools/testing/selftests/bpf/config11
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.c45
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.h67
-rw-r--r--tools/testing/selftests/bpf/map_tests/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/map_tests/sk_storage_map.c629
-rw-r--r--tools/testing/selftests/bpf/prog_tests/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c249
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c49
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector.c270
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c48
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c139
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_data.c157
-rw-r--r--tools/testing/selftests/bpf/prog_tests/l4lb_all.c90
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_lock.c75
-rw-r--r--tools/testing/selftests/bpf/prog_tests/obj_name.c71
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pkt_access.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pkt_md_access.c24
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c49
-rw-r--r--tools/testing/selftests/bpf/prog_tests/queue_stack_map.c103
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c42
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c80
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reference_tracking.c48
-rw-r--r--tools/testing/selftests/bpf/prog_tests/signal_pending.c48
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skb_ctx.c89
-rw-r--r--tools/testing/selftests/bpf/prog_tests/spinlock.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c165
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c164
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map.c103
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c59
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c78
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c82
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_estats.c19
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tp_attach_query.c135
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp.c46
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c31
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_noinline.c82
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_flow.c (renamed from tools/testing/selftests/bpf/bpf_flow.c)98
-rw-r--r--tools/testing/selftests/bpf/progs/connect4_prog.c (renamed from tools/testing/selftests/bpf/connect4_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/connect6_prog.c (renamed from tools/testing/selftests/bpf/connect6_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/dev_cgroup.c (renamed from tools/testing/selftests/bpf/dev_cgroup.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c (renamed from tools/testing/selftests/bpf/get_cgroup_id_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/netcnt_prog.c (renamed from tools/testing/selftests/bpf/netcnt_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sample_map_ret0.c (renamed from tools/testing/selftests/bpf/sample_map_ret0.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sample_ret0.c (renamed from tools/testing/selftests/bpf/sample_ret0.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg4_prog.c (renamed from tools/testing/selftests/bpf/sendmsg4_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg6_prog.c (renamed from tools/testing/selftests/bpf/sendmsg6_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/socket_cookie_prog.c (renamed from tools/testing/selftests/bpf/socket_cookie_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_parse_prog.c (renamed from tools/testing/selftests/bpf/sockmap_parse_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c (renamed from tools/testing/selftests/bpf/sockmap_tcp_msg_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c (renamed from tools/testing/selftests/bpf/sockmap_verdict_prog.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_adjust_tail.c (renamed from tools/testing/selftests/bpf/test_adjust_tail.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_haskv.c (renamed from tools/testing/selftests/bpf/test_btf_haskv.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_nokv.c (renamed from tools/testing/selftests/bpf/test_btf_nokv.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c (renamed from tools/testing/selftests/bpf/test_get_stack_rawtp.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_data.c106
-rw-r--r--tools/testing/selftests/bpf/progs/test_jhash.h70
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb.c (renamed from tools/testing/selftests/bpf/test_l4lb.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb_noinline.c (renamed from tools/testing/selftests/bpf/test_l4lb_noinline.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_lirc_mode2_kern.c (renamed from tools/testing/selftests/bpf/test_lirc_mode2_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c85
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_seg6local.c (renamed from tools/testing/selftests/bpf/test_lwt_seg6local.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_in_map.c (renamed from tools/testing/selftests/bpf/test_map_in_map.c)4
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_lock.c66
-rw-r--r--tools/testing/selftests/bpf/progs/test_obj_id.c (renamed from tools/testing/selftests/bpf/test_obj_id.c)5
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_access.c (renamed from tools/testing/selftests/bpf/test_pkt_access.c)5
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_md_access.c (renamed from tools/testing/selftests/bpf/test_pkt_md_access.c)5
-rw-r--r--tools/testing/selftests/bpf/progs/test_queue_map.c (renamed from tools/testing/selftests/bpf/test_queue_map.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c (renamed from tools/testing/selftests/bpf/test_select_reuseport_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c (renamed from tools/testing/selftests/bpf/test_sk_lookup_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c (renamed from tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_ctx.c21
-rw-r--r--tools/testing/selftests/bpf/progs/test_sock_fields_kern.c261
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockhash_kern.c (renamed from tools/testing/selftests/bpf/test_sockhash_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_kern.c (renamed from tools/testing/selftests/bpf/test_sockmap_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock.c108
-rw-r--r--tools/testing/selftests/bpf/progs/test_stack_map.c (renamed from tools/testing/selftests/bpf/test_stack_map.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c (renamed from tools/testing/selftests/bpf/test_stacktrace_build_id.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_map.c (renamed from tools/testing/selftests/bpf/test_stacktrace_map.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_prog.c70
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_edt.c109
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_tunnel.c536
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c129
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_estats.c (renamed from tools/testing/selftests/bpf/test_tcp_estats.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c (renamed from tools/testing/selftests/bpf/test_tcpbpf_kern.c)2
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c (renamed from tools/testing/selftests/bpf/test_tcpnotify_kern.c)2
-rw-r--r--tools/testing/selftests/bpf/progs/test_tracepoint.c (renamed from tools/testing/selftests/bpf/test_tracepoint.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_tunnel_kern.c (renamed from tools/testing/selftests/bpf/test_tunnel_kern.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_verif_scale1.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_verif_scale2.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_verif_scale3.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp.c (renamed from tools/testing/selftests/bpf/test_xdp.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_meta.c (renamed from tools/testing/selftests/bpf/test_xdp_meta.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_noinline.c (renamed from tools/testing/selftests/bpf/test_xdp_noinline.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_redirect.c (renamed from tools/testing/selftests/bpf/test_xdp_redirect.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_vlan.c (renamed from tools/testing/selftests/bpf/test_xdp_vlan.c)0
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_dummy.c (renamed from tools/testing/selftests/bpf/xdp_dummy.c)0
-rwxr-xr-xtools/testing/selftests/bpf/tcp_client.py3
-rwxr-xr-xtools/testing/selftests/bpf/tcp_server.py5
-rw-r--r--tools/testing/selftests/bpf/test_btf.c2147
-rw-r--r--tools/testing/selftests/bpf/test_btf.h69
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c5
-rw-r--r--tools/testing/selftests/bpf/test_flow_dissector.c2
-rw-r--r--tools/testing/selftests/bpf/test_iptunnel_common.h5
-rw-r--r--tools/testing/selftests/bpf/test_libbpf_open.c34
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c41
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c293
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_ip_encap.sh464
-rw-r--r--tools/testing/selftests/bpf/test_maps.c64
-rw-r--r--tools/testing/selftests/bpf/test_maps.h17
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py348
-rw-r--r--tools/testing/selftests/bpf/test_progs.c1835
-rw-r--r--tools/testing/selftests/bpf/test_progs.h94
-rw-r--r--tools/testing/selftests/bpf/test_section_names.c15
-rw-r--r--tools/testing/selftests/bpf/test_sock.c9
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c213
-rw-r--r--tools/testing/selftests/bpf/test_sock_fields.c489
-rw-r--r--tools/testing/selftests/bpf/test_socket_cookie.c4
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c1
-rw-r--r--tools/testing/selftests/bpf/test_sysctl.c1567
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_edt.sh99
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_tunnel.sh290
-rwxr-xr-xtools/testing/selftests/bpf/test_tcp_check_syncookie.sh81
-rw-r--r--tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c212
-rw-r--r--tools/testing/selftests/bpf/test_tcpnotify_user.c6
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c15759
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c4
-rw-r--r--tools/testing/selftests/bpf/urandom_read.c15
-rw-r--r--tools/testing/selftests/bpf/verifier/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/verifier/and.c50
-rw-r--r--tools/testing/selftests/bpf/verifier/array_access.c378
-rw-r--r--tools/testing/selftests/bpf/verifier/basic.c23
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_call.c50
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_instr.c134
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_stack.c64
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_stx_ldx.c45
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds.c508
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds_deduction.c124
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c406
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_get_stack.c44
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c2030
-rw-r--r--tools/testing/selftests/bpf/verifier/cfg.c70
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c72
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_skb.c197
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_storage.c220
-rw-r--r--tools/testing/selftests/bpf/verifier/const_or.c60
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx.c93
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_sk_msg.c181
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_skb.c1033
-rw-r--r--tools/testing/selftests/bpf/verifier/dead_code.c159
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_packet_access.c655
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c40
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_value_access.c347
-rw-r--r--tools/testing/selftests/bpf/verifier/div0.c184
-rw-r--r--tools/testing/selftests/bpf/verifier/div_overflow.c110
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_access_var_len.c614
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_packet_access.c460
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_value_access.c953
-rw-r--r--tools/testing/selftests/bpf/verifier/int_ptr.c160
-rw-r--r--tools/testing/selftests/bpf/verifier/jit.c107
-rw-r--r--tools/testing/selftests/bpf/verifier/jmp32.c746
-rw-r--r--tools/testing/selftests/bpf/verifier/jset.c167
-rw-r--r--tools/testing/selftests/bpf/verifier/jump.c375
-rw-r--r--tools/testing/selftests/bpf/verifier/junk_insn.c45
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_abs.c286
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_dw.c45
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_imm64.c154
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_ind.c72
-rw-r--r--tools/testing/selftests/bpf/verifier/leak_ptr.c67
-rw-r--r--tools/testing/selftests/bpf/verifier/lwt.c189
-rw-r--r--tools/testing/selftests/bpf/verifier/map_in_map.c62
-rw-r--r--tools/testing/selftests/bpf/verifier/map_ptr_mixing.c100
-rw-r--r--tools/testing/selftests/bpf/verifier/map_ret_val.c65
-rw-r--r--tools/testing/selftests/bpf/verifier/masking.c322
-rw-r--r--tools/testing/selftests/bpf/verifier/meta_access.c235
-rw-r--r--tools/testing/selftests/bpf/verifier/perf_event_sample_period.c59
-rw-r--r--tools/testing/selftests/bpf/verifier/prevent_map_lookup.c74
-rw-r--r--tools/testing/selftests/bpf/verifier/raw_stack.c305
-rw-r--r--tools/testing/selftests/bpf/verifier/raw_tp_writable.c34
-rw-r--r--tools/testing/selftests/bpf/verifier/ref_tracking.c823
-rw-r--r--tools/testing/selftests/bpf/verifier/runtime_jit.c80
-rw-r--r--tools/testing/selftests/bpf/verifier/scale.c18
-rw-r--r--tools/testing/selftests/bpf/verifier/search_pruning.c156
-rw-r--r--tools/testing/selftests/bpf/verifier/sock.c500
-rw-r--r--tools/testing/selftests/bpf/verifier/spill_fill.c76
-rw-r--r--tools/testing/selftests/bpf/verifier/spin_lock.c333
-rw-r--r--tools/testing/selftests/bpf/verifier/stack_ptr.c317
-rw-r--r--tools/testing/selftests/bpf/verifier/subreg.c533
-rw-r--r--tools/testing/selftests/bpf/verifier/uninit.c39
-rw-r--r--tools/testing/selftests/bpf/verifier/unpriv.c522
-rw-r--r--tools/testing/selftests/bpf/verifier/value.c104
-rw-r--r--tools/testing/selftests/bpf/verifier/value_adj_spill.c43
-rw-r--r--tools/testing/selftests/bpf/verifier/value_illegal_alu.c94
-rw-r--r--tools/testing/selftests/bpf/verifier/value_or_null.c152
-rw-r--r--tools/testing/selftests/bpf/verifier/value_ptr_arith.c838
-rw-r--r--tools/testing/selftests/bpf/verifier/var_off.c248
-rw-r--r--tools/testing/selftests/bpf/verifier/xadd.c97
-rw-r--r--tools/testing/selftests/bpf/verifier/xdp.c14
-rw-r--r--tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c900
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test.c18
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test_arm64.c14
-rw-r--r--tools/testing/selftests/breakpoints/step_after_suspend_test.c19
-rw-r--r--tools/testing/selftests/capabilities/test_execve.c6
-rw-r--r--tools/testing/selftests/cgroup/.gitignore1
-rw-r--r--tools/testing/selftests/cgroup/Makefile2
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c58
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.h5
-rw-r--r--tools/testing/selftests/cgroup/test_core.c7
-rw-r--r--tools/testing/selftests/cgroup/test_freezer.c851
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c42
-rw-r--r--tools/testing/selftests/drivers/.gitignore1
-rw-r--r--tools/testing/selftests/drivers/dma-buf/Makefile1
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh200
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh3
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh3
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh311
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh98
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh122
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh93
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh460
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh3
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh6
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh126
-rw-r--r--tools/testing/selftests/efivarfs/Makefile1
-rwxr-xr-xtools/testing/selftests/efivarfs/efivarfs.sh28
-rw-r--r--tools/testing/selftests/exec/.gitignore3
-rw-r--r--tools/testing/selftests/exec/Makefile4
-rw-r--r--tools/testing/selftests/exec/execveat.c3
-rw-r--r--tools/testing/selftests/exec/recursion-depth.c67
-rw-r--r--tools/testing/selftests/firmware/Makefile1
-rw-r--r--tools/testing/selftests/firmware/config1
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh9
-rwxr-xr-xtools/testing/selftests/firmware/fw_lib.sh2
-rwxr-xr-xtools/testing/selftests/ftrace/ftracetest24
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc19
-rw-r--r--tools/testing/selftests/ftrace/test.d/functions12
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc85
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/uprobe_syntax_errors.tc23
-rw-r--r--tools/testing/selftests/ftrace/test.d/selftest/bashisms.tc21
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc30
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc27
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc28
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc43
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc42
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi.c7
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c7
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c7
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c7
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_timeout.c7
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c7
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_wouldblock.c7
-rwxr-xr-xtools/testing/selftests/futex/functional/run.sh6
-rw-r--r--tools/testing/selftests/futex/include/atomic.h6
-rw-r--r--tools/testing/selftests/futex/include/futextest.h6
-rw-r--r--tools/testing/selftests/futex/include/logging.h6
-rwxr-xr-xtools/testing/selftests/futex/run.sh6
-rw-r--r--tools/testing/selftests/gpio/gpio-mockup-chardev.c6
-rw-r--r--tools/testing/selftests/ia64/aliasing-test.c5
-rw-r--r--tools/testing/selftests/ima/config4
-rwxr-xr-xtools/testing/selftests/ima/test_kexec_load.sh54
-rw-r--r--tools/testing/selftests/ipc/msgque.c11
-rw-r--r--tools/testing/selftests/ir/ir_loopback.c15
-rwxr-xr-xtools/testing/selftests/ir/ir_loopback.sh5
-rw-r--r--tools/testing/selftests/kcmp/Makefile1
-rw-r--r--tools/testing/selftests/kexec/Makefile (renamed from tools/testing/selftests/ima/Makefile)6
-rw-r--r--tools/testing/selftests/kexec/config3
-rwxr-xr-xtools/testing/selftests/kexec/kexec_common_lib.sh220
-rwxr-xr-xtools/testing/selftests/kexec/test_kexec_file_load.sh208
-rwxr-xr-xtools/testing/selftests/kexec/test_kexec_load.sh47
-rw-r--r--tools/testing/selftests/kmod/Makefile1
-rw-r--r--tools/testing/selftests/kselftest.h18
-rwxr-xr-xtools/testing/selftests/kselftest/prefix.pl23
-rw-r--r--tools/testing/selftests/kselftest/runner.sh76
-rw-r--r--tools/testing/selftests/kselftest_harness.h27
-rw-r--r--tools/testing/selftests/kselftest_module.h48
-rwxr-xr-xtools/testing/selftests/kselftest_module.sh84
-rw-r--r--tools/testing/selftests/kvm/.gitignore9
-rw-r--r--tools/testing/selftests/kvm/Makefile32
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c19
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h13
-rw-r--r--tools/testing/selftests/kvm/include/sparsebit.h4
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h4
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h33
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/vmx.h4
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c11
-rw-r--r--tools/testing/selftests/kvm/lib/assert.c3
-rw-r--r--tools/testing/selftests/kvm/lib/elf.c3
-rw-r--r--tools/testing/selftests/kvm/lib/io.c3
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c97
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util_internal.h3
-rw-r--r--tools/testing/selftests/kvm/lib/sparsebit.c3
-rw-r--r--tools/testing/selftests/kvm/lib/ucall.c2
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c43
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c36
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c13
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c18
-rw-r--r--tools/testing/selftests/kvm/x86_64/kvm_create_max_vcpus.c69
-rw-r--r--tools/testing/selftests/kvm/x86_64/mmio_warning_test.c126
-rw-r--r--tools/testing/selftests/kvm/x86_64/platform_info_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_sregs_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/smm_test.c156
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c15
-rw-r--r--tools/testing/selftests/kvm/x86_64/sync_regs_test.c57
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c91
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c287
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c9
-rw-r--r--tools/testing/selftests/lib.mk96
-rw-r--r--tools/testing/selftests/lib/Makefile3
-rwxr-xr-xtools/testing/selftests/lib/bitmap.sh18
-rw-r--r--tools/testing/selftests/lib/config1
-rwxr-xr-xtools/testing/selftests/lib/prime_numbers.sh17
-rwxr-xr-xtools/testing/selftests/lib/printf.sh19
-rwxr-xr-xtools/testing/selftests/lib/strscpy.sh3
-rw-r--r--tools/testing/selftests/livepatch/Makefile9
-rw-r--r--tools/testing/selftests/livepatch/README43
-rw-r--r--tools/testing/selftests/livepatch/config1
-rw-r--r--tools/testing/selftests/livepatch/functions.sh198
-rwxr-xr-xtools/testing/selftests/livepatch/test-callbacks.sh587
-rwxr-xr-xtools/testing/selftests/livepatch/test-livepatch.sh168
-rwxr-xr-xtools/testing/selftests/livepatch/test-shadow-vars.sh60
-rwxr-xr-xtools/testing/selftests/media_tests/media_dev_allocator.sh85
-rw-r--r--tools/testing/selftests/membarrier/Makefile1
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test.c1
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c74
-rw-r--r--tools/testing/selftests/net/config5
-rwxr-xr-xtools/testing/selftests/net/fib_rule_tests.sh20
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh164
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_igmp.sh152
-rw-r--r--tools/testing/selftests/net/forwarding/config2
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh112
-rw-r--r--tools/testing/selftests/net/forwarding/forwarding.config.sample3
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_flat_gre.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_flat_gre_key.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_flat_gre_keys.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_hier_gre.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_hier_gre_key.sh63
-rwxr-xr-xtools/testing/selftests/net/forwarding/ipip_hier_gre_keys.sh63
-rw-r--r--tools/testing/selftests/net/forwarding/ipip_lib.sh349
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh9
-rwxr-xr-xtools/testing/selftests/net/forwarding/loopback.sh94
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh11
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_changes.sh1
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_flower.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh88
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_lib.sh5
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_broadcast.sh6
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multicast.sh107
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_flower.sh93
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_vlan_modify.sh164
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_asymmetric.sh567
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_symmetric.sh551
-rw-r--r--tools/testing/selftests/net/ip_defrag.c69
-rwxr-xr-xtools/testing/selftests/net/ip_defrag.sh16
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh217
-rw-r--r--tools/testing/selftests/net/psock_fanout.c16
-rw-r--r--tools/testing/selftests/net/psock_lib.h16
-rw-r--r--tools/testing/selftests/net/psock_tpacket.c16
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh151
-rwxr-xr-xtools/testing/selftests/net/run_afpackettests5
-rwxr-xr-xtools/testing/selftests/net/run_netsocktests2
-rw-r--r--tools/testing/selftests/net/tcp_inq.c12
-rw-r--r--tools/testing/selftests/net/tcp_mmap.c16
-rw-r--r--tools/testing/selftests/net/tls.c198
-rw-r--r--tools/testing/selftests/net/udpgso.c1
-rw-r--r--tools/testing/selftests/netfilter/Makefile3
-rwxr-xr-xtools/testing/selftests/netfilter/bridge_brouter.sh146
-rwxr-xr-xtools/testing/selftests/netfilter/conntrack_icmp_related.sh283
-rwxr-xr-xtools/testing/selftests/netfilter/nft_flowtable.sh324
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat.sh173
-rw-r--r--tools/testing/selftests/networking/timestamping/rxtimestamp.c1
-rw-r--r--tools/testing/selftests/networking/timestamping/timestamping.c14
-rw-r--r--tools/testing/selftests/networking/timestamping/txtimestamp.c15
-rw-r--r--tools/testing/selftests/nsfs/Makefile1
-rwxr-xr-xtools/testing/selftests/ntb/ntb_test.sh11
-rw-r--r--tools/testing/selftests/pidfd/.gitignore1
-rw-r--r--tools/testing/selftests/pidfd/Makefile7
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c386
-rw-r--r--tools/testing/selftests/powerpc/alignment/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/alignment/alignment_handler.c6
-rw-r--r--tools/testing/selftests/powerpc/alignment/copy_first_unaligned.c7
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/context_switch.c6
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/futex_bench.c2
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/gettimeofday.c2
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/mmap_bench.c2
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/null_syscall.c8
-rw-r--r--tools/testing/selftests/powerpc/cache_shape/cache_shape.c6
-rw-r--r--tools/testing/selftests/powerpc/copyloops/asm/export.h1
-rw-r--r--tools/testing/selftests/powerpc/copyloops/asm/kasan.h0
-rw-r--r--tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h1
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr.h5
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_default_test.c5
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_explicit_test.c5
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c5
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c5
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c5
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c5
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_user_test.c5
-rw-r--r--tools/testing/selftests/powerpc/harness.c8
-rw-r--r--tools/testing/selftests/powerpc/include/fpu_asm.h6
-rw-r--r--tools/testing/selftests/powerpc/include/gpr_asm.h6
-rw-r--r--tools/testing/selftests/powerpc/include/reg.h12
-rw-r--r--tools/testing/selftests/powerpc/include/subunit.h2
-rw-r--r--tools/testing/selftests/powerpc/include/utils.h4
-rw-r--r--tools/testing/selftests/powerpc/include/vmx_asm.h6
-rw-r--r--tools/testing/selftests/powerpc/include/vsx_asm.h6
-rw-r--r--tools/testing/selftests/powerpc/lib/reg.S6
-rw-r--r--tools/testing/selftests/powerpc/math/fpu_asm.S6
-rw-r--r--tools/testing/selftests/powerpc/math/fpu_preempt.c6
-rw-r--r--tools/testing/selftests/powerpc/math/fpu_signal.c6
-rw-r--r--tools/testing/selftests/powerpc/math/fpu_syscall.c6
-rw-r--r--tools/testing/selftests/powerpc/math/vmx_asm.S6
-rw-r--r--tools/testing/selftests/powerpc/math/vmx_preempt.c6
-rw-r--r--tools/testing/selftests/powerpc/math/vmx_signal.c6
-rw-r--r--tools/testing/selftests/powerpc/math/vmx_syscall.c6
-rw-r--r--tools/testing/selftests/powerpc/math/vsx_asm.S6
-rw-r--r--tools/testing/selftests/powerpc/math/vsx_preempt.c6
-rw-r--r--tools/testing/selftests/powerpc/mm/.gitignore3
-rw-r--r--tools/testing/selftests/powerpc/mm/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c87
-rw-r--r--tools/testing/selftests/powerpc/mm/prot_sao.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/count_instructions.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/busy_loop.S2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb.h2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb_handler.S2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c3
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/reg_access_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/trace.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/trace.h2
-rw-r--r--tools/testing/selftests/powerpc/pmu/event.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/event.h2
-rw-r--r--tools/testing/selftests/powerpc/pmu/l3_bank_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/lib.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/lib.h2
-rw-r--r--tools/testing/selftests/powerpc/pmu/loop.S2
-rw-r--r--tools/testing/selftests/powerpc/pmu/per_event_excludes.c2
-rw-r--r--tools/testing/selftests/powerpc/primitives/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c6
-rw-r--r--tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c6
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c6
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h6
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tar.c6
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tar.h6
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c6
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c6
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c6
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c6
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c6
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c6
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c6
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c6
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h6
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace.h6
-rwxr-xr-xtools/testing/selftests/powerpc/scripts/hmi.sh9
-rw-r--r--tools/testing/selftests/powerpc/signal/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/signal/Makefile3
-rw-r--r--tools/testing/selftests/powerpc/signal/sigfuz.c325
-rw-r--r--tools/testing/selftests/powerpc/signal/signal.S6
-rw-r--r--tools/testing/selftests/powerpc/signal/signal.c6
-rw-r--r--tools/testing/selftests/powerpc/signal/signal_tm.c6
-rw-r--r--tools/testing/selftests/powerpc/stringloops/asm/ppc-opcode.h6
-rw-r--r--tools/testing/selftests/powerpc/syscalls/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/syscalls/ipc_unmuxed.c6
-rw-r--r--tools/testing/selftests/powerpc/tm/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-exec.c6
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-fork.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c7
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c7
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c7
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c7
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c184
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-msr-resv.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-stack.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal.S6
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-syscall.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-tar.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-tmspr.c3
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-trap.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-unavailable.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-vmxcopy.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm.h2
-rw-r--r--tools/testing/selftests/powerpc/utils.c2
-rw-r--r--tools/testing/selftests/powerpc/vphn/Makefile1
l---------tools/testing/selftests/powerpc/vphn/vphn.c2
l---------tools/testing/selftests/powerpc/vphn/vphn.h2
-rw-r--r--tools/testing/selftests/proc/.gitignore1
-rw-r--r--tools/testing/selftests/proc/Makefile2
-rw-r--r--tools/testing/selftests/proc/proc-loadavg-001.c2
-rw-r--r--tools/testing/selftests/proc/proc-pid-vm.c449
-rw-r--r--tools/testing/selftests/proc/proc-self-map-files-002.c20
-rw-r--r--tools/testing/selftests/proc/proc-self-syscall.c3
-rw-r--r--tools/testing/selftests/proc/proc-self-wchan.c2
-rw-r--r--tools/testing/selftests/proc/read.c14
-rwxr-xr-xtools/testing/selftests/pstore/common_tests2
-rwxr-xr-xtools/testing/selftests/pstore/pstore_crash_test2
-rwxr-xr-xtools/testing/selftests/pstore/pstore_post_reboot_tests2
-rwxr-xr-xtools/testing/selftests/pstore/pstore_tests2
-rw-r--r--tools/testing/selftests/ptp/testptp.c100
-rw-r--r--tools/testing/selftests/ptrace/Makefile1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configNR_CPUS.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/config_override.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configcheck.sh19
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/cpus2use.sh17
-rw-r--r--tools/testing/selftests/rcutorture/bin/functions.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/jitter.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-find-errors.sh5
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/mkinitrd.sh42
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-build.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh17
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh17
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh17
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh17
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h4
-rw-r--r--tools/testing/selftests/rseq/Makefile8
-rw-r--r--tools/testing/selftests/rseq/rseq-arm.h132
-rw-r--r--tools/testing/selftests/rseq/rseq-arm64.h74
-rw-r--r--tools/testing/selftests/rseq/rseq-mips.h115
-rw-r--r--tools/testing/selftests/rseq/rseq-ppc.h90
-rw-r--r--tools/testing/selftests/rseq/rseq-s390.h78
-rw-r--r--tools/testing/selftests/rseq/rseq-x86.h264
-rw-r--r--tools/testing/selftests/rseq/rseq.c55
-rw-r--r--tools/testing/selftests/rseq/rseq.h2
-rwxr-xr-xtools/testing/selftests/rseq/run_param_test.sh7
-rw-r--r--tools/testing/selftests/rtc/rtctest.c6
-rw-r--r--tools/testing/selftests/rtc/setdate.c11
-rw-r--r--tools/testing/selftests/safesetid/.gitignore1
-rw-r--r--tools/testing/selftests/safesetid/Makefile8
-rw-r--r--tools/testing/selftests/safesetid/config2
-rw-r--r--tools/testing/selftests/safesetid/safesetid-test.c334
-rwxr-xr-xtools/testing/selftests/safesetid/safesetid-test.sh26
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c126
-rw-r--r--tools/testing/selftests/sigaltstack/Makefile1
-rw-r--r--tools/testing/selftests/sigaltstack/sas.c1
-rw-r--r--tools/testing/selftests/size/Makefile1
-rw-r--r--tools/testing/selftests/size/get_size.c3
-rw-r--r--tools/testing/selftests/static_keys/Makefile1
-rw-r--r--tools/testing/selftests/sync/sync_test.c1
-rw-r--r--tools/testing/selftests/sysctl/Makefile1
-rwxr-xr-xtools/testing/selftests/sysctl/sysctl.sh216
-rw-r--r--tools/testing/selftests/tc-testing/.gitignore1
-rw-r--r--tools/testing/selftests/tc-testing/TdcPlugin.py4
-rw-r--r--tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt5
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py16
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/csum.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/gact.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ife.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/nat.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json954
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/police.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/sample.json49
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/simple.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/concurrency.json177
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/tests.json40
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py15
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc_batch.py58
-rw-r--r--tools/testing/selftests/tc-testing/tdc_config.py1
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc_multibatch.py65
-rw-r--r--tools/testing/selftests/timers/adjtick.c1
-rw-r--r--tools/testing/selftests/timers/freq-step.c10
-rw-r--r--tools/testing/selftests/timers/leapcrash.c1
-rw-r--r--tools/testing/selftests/timers/mqueue-lat.c1
-rw-r--r--tools/testing/selftests/timers/nanosleep.c1
-rw-r--r--tools/testing/selftests/timers/nsleep-lat.c1
-rw-r--r--tools/testing/selftests/timers/posix_timers.c3
-rw-r--r--tools/testing/selftests/timers/raw_skew.c1
-rw-r--r--tools/testing/selftests/timers/set-tai.c1
-rw-r--r--tools/testing/selftests/timers/set-tz.c2
-rw-r--r--tools/testing/selftests/timers/skew_consistency.c1
-rw-r--r--tools/testing/selftests/timers/threadtest.c1
-rw-r--r--tools/testing/selftests/timers/valid-adjtimex.c2
-rw-r--r--tools/testing/selftests/tmpfs/.gitignore1
-rw-r--r--tools/testing/selftests/tmpfs/Makefile8
-rw-r--r--tools/testing/selftests/tmpfs/bug-link-o-tmpfile.c67
-rw-r--r--tools/testing/selftests/tpm2/Makefile4
-rwxr-xr-xtools/testing/selftests/tpm2/test_smoke.sh4
-rwxr-xr-xtools/testing/selftests/tpm2/test_space.sh4
-rw-r--r--tools/testing/selftests/tpm2/tpm2.py697
-rw-r--r--tools/testing/selftests/tpm2/tpm2_tests.py290
-rw-r--r--tools/testing/selftests/user/Makefile1
-rw-r--r--tools/testing/selftests/vDSO/vdso_standalone_test_x86.c2
-rw-r--r--tools/testing/selftests/vDSO/vdso_test.c2
-rw-r--r--tools/testing/selftests/vm/Makefile6
-rw-r--r--tools/testing/selftests/vm/map_hugetlb.c29
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests16
-rwxr-xr-xtools/testing/selftests/vm/test_vmalloc.sh176
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c6
-rw-r--r--tools/testing/selftests/vm/va_128TBswitch.c10
-rw-r--r--tools/testing/selftests/vm/virtual_address_range.c2
-rwxr-xr-xtools/testing/selftests/x86/check_cc.sh2
-rw-r--r--tools/testing/selftests/x86/check_initial_reg_state.c10
-rw-r--r--tools/testing/selftests/x86/entry_from_vm86.c3
-rw-r--r--tools/testing/selftests/x86/fsgsbase.c2
-rw-r--r--tools/testing/selftests/x86/mpx-dig.c2
-rw-r--r--tools/testing/selftests/x86/mpx-mini-test.c5
-rw-r--r--tools/testing/selftests/x86/sigreturn.c10
-rw-r--r--tools/testing/selftests/x86/single_step_syscall.c10
-rw-r--r--tools/testing/selftests/x86/syscall_arg_fault.c20
-rw-r--r--tools/testing/selftests/x86/syscall_nt.c10
-rw-r--r--tools/testing/selftests/x86/sysret_rip.c10
-rw-r--r--tools/testing/selftests/x86/sysret_ss_attrs.c10
-rw-r--r--tools/testing/selftests/x86/test_mremap_vdso.c10
-rw-r--r--tools/testing/selftests/x86/test_syscall_vdso.c10
-rw-r--r--tools/testing/selftests/x86/thunks.S10
-rw-r--r--tools/testing/selftests/x86/thunks_32.S10
-rw-r--r--tools/testing/selftests/x86/trivial_32bit_program.c2
-rw-r--r--tools/testing/selftests/x86/trivial_64bit_program.c2
-rw-r--r--tools/testing/selftests/x86/unwind_vdso.c10
-rw-r--r--tools/testing/selftests/x86/vdso_restorer.c10
-rwxr-xr-xtools/testing/selftests/zram/zram01.sh11
-rwxr-xr-xtools/testing/selftests/zram/zram02.sh11
-rwxr-xr-xtools/testing/selftests/zram/zram_lib.sh11
-rw-r--r--tools/testing/vsock/Makefile1
-rw-r--r--tools/testing/vsock/control.c6
-rw-r--r--tools/testing/vsock/timeout.c6
-rw-r--r--tools/testing/vsock/vsock_diag_test.c6
-rw-r--r--tools/thermal/tmon/pid.c13
-rw-r--r--tools/thermal/tmon/sysfs.c11
-rw-r--r--tools/thermal/tmon/tmon.c11
-rw-r--r--tools/thermal/tmon/tmon.h11
-rw-r--r--tools/thermal/tmon/tui.c11
-rwxr-xr-xtools/time/udelay_test.sh9
-rw-r--r--tools/usb/ffs-aio-example/simple/host_app/Makefile1
-rw-r--r--tools/usb/ffs-test.c15
-rw-r--r--tools/usb/testusb.c15
-rw-r--r--tools/usb/usbip/libsrc/names.c20
-rw-r--r--tools/usb/usbip/libsrc/names.h17
-rw-r--r--tools/usb/usbip/libsrc/usbip_device_driver.c14
-rw-r--r--tools/usb/usbip/libsrc/usbip_device_driver.h14
-rw-r--r--tools/usb/usbip/libsrc/usbip_host_common.c14
-rw-r--r--tools/usb/usbip/libsrc/usbip_host_common.h14
-rw-r--r--tools/usb/usbip/libsrc/usbip_host_driver.c14
-rw-r--r--tools/usb/usbip/libsrc/usbip_host_driver.h14
-rw-r--r--tools/usb/usbip/src/usbip.c14
-rw-r--r--tools/usb/usbip/src/usbip.h14
-rw-r--r--tools/usb/usbip/src/usbip_attach.c14
-rw-r--r--tools/usb/usbip/src/usbip_bind.c14
-rw-r--r--tools/usb/usbip/src/usbip_detach.c14
-rw-r--r--tools/usb/usbip/src/usbip_list.c14
-rw-r--r--tools/usb/usbip/src/usbip_network.c14
-rw-r--r--tools/usb/usbip/src/usbip_port.c11
-rw-r--r--tools/usb/usbip/src/usbip_unbind.c14
-rw-r--r--tools/usb/usbip/src/usbipd.c14
-rw-r--r--tools/usb/usbip/src/utils.c14
-rw-r--r--tools/usb/usbip/src/utils.h14
-rw-r--r--tools/virtio/linux/kernel.h2
-rw-r--r--tools/virtio/ringtest/main.c2
-rw-r--r--tools/virtio/ringtest/main.h2
-rw-r--r--tools/virtio/ringtest/ptr_ring.c1
-rw-r--r--tools/virtio/ringtest/ring.c2
-rw-r--r--tools/virtio/ringtest/virtio_ring_0_9.c2
-rw-r--r--tools/virtio/vhost_test/Makefile1
-rw-r--r--tools/virtio/virtio-trace/trace-agent-ctl.c4
-rw-r--r--tools/virtio/virtio-trace/trace-agent-rw.c4
-rw-r--r--tools/virtio/virtio-trace/trace-agent.c4
-rw-r--r--tools/vm/page-types.c16
-rw-r--r--tools/vm/slabinfo-gnuplot.sh9
-rw-r--r--tools/vm/slabinfo.c35
-rw-r--r--tools/wmi/Makefile1
-rw-r--r--tools/wmi/dell-smbios-example.c5
1521 files changed, 91596 insertions, 35330 deletions
diff --git a/tools/Makefile b/tools/Makefile
index 77f1aee8ea01..3dfd72ae6c1a 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -12,6 +12,7 @@ help:
@echo ' acpi - ACPI tools'
@echo ' cgroup - cgroup tools'
@echo ' cpupower - a tool for all things x86 CPU power'
+ @echo ' debugging - tools for debugging'
@echo ' firewire - the userspace part of nosy, an IEEE-1394 traffic sniffer'
@echo ' firmware - Firmware tools'
@echo ' freefall - laptop accelerometer program for disk protection'
@@ -61,7 +62,7 @@ acpi: FORCE
cpupower: FORCE
$(call descend,power/$@)
-cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware: FORCE
+cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE
$(call descend,$@)
liblockdep: FORCE
@@ -96,7 +97,8 @@ kvm_stat: FORCE
all: acpi cgroup cpupower gpio hv firewire liblockdep \
perf selftests spi turbostat usb \
virtio vm bpf x86_energy_perf_policy \
- tmon freefall iio objtool kvm_stat wmi pci
+ tmon freefall iio objtool kvm_stat wmi \
+ pci debugging
acpi_install:
$(call descend,power/$(@:_install=),install)
@@ -104,7 +106,7 @@ acpi_install:
cpupower_install:
$(call descend,power/$(@:_install=),install)
-cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install:
+cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install debugging_install:
$(call descend,$(@:_install=),install)
liblockdep_install:
@@ -130,7 +132,7 @@ install: acpi_install cgroup_install cpupower_install gpio_install \
perf_install selftests_install turbostat_install usb_install \
virtio_install vm_install bpf_install x86_energy_perf_policy_install \
tmon_install freefall_install objtool_install kvm_stat_install \
- wmi_install pci_install
+ wmi_install pci_install debugging_install
acpi_clean:
$(call descend,power/acpi,clean)
@@ -138,7 +140,7 @@ acpi_clean:
cpupower_clean:
$(call descend,power/cpupower,clean)
-cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean:
+cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean:
$(call descend,$(@:_clean=),clean)
liblockdep_clean:
@@ -176,6 +178,6 @@ clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \
perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \
vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \
- gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean
+ gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean debugging_clean
.PHONY: FORCE
diff --git a/tools/arch/alpha/include/uapi/asm/mman.h b/tools/arch/alpha/include/uapi/asm/mman.h
index c317d3e6867a..ea6a255ae61f 100644
--- a/tools/arch/alpha/include/uapi/asm/mman.h
+++ b/tools/arch/alpha/include/uapi/asm/mman.h
@@ -27,8 +27,6 @@
#define MAP_NONBLOCK 0x40000
#define MAP_NORESERVE 0x10000
#define MAP_POPULATE 0x20000
-#define MAP_PRIVATE 0x02
-#define MAP_SHARED 0x01
#define MAP_STACK 0x80000
#define PROT_EXEC 0x4
#define PROT_GROWSDOWN 0x01000000
diff --git a/tools/arch/arc/include/uapi/asm/unistd.h b/tools/arch/arc/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..5eafa1115162
--- /dev/null
+++ b/tools/arch/arc/include/uapi/asm/unistd.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/******** no-legacy-syscalls-ABI *******/
+
+/*
+ * Non-typical guard macro to enable inclusion twice in ARCH sys.c
+ * That is how the Generic syscall wrapper generator works
+ */
+#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
+#define _UAPI_ASM_ARC_UNISTD_H
+
+#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_SYS_EXECVE
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_TIME32_SYSCALLS
+
+#define sys_mmap2 sys_mmap_pgoff
+
+#include <asm-generic/unistd.h>
+
+#define NR_syscalls __NR_syscalls
+
+/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
+#define __NR_sysfs (__NR_arch_specific_syscall + 3)
+
+/* ARC specific syscall */
+#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
+#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
+#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
+#define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4)
+
+__SYSCALL(__NR_cacheflush, sys_cacheflush)
+__SYSCALL(__NR_arc_settls, sys_arc_settls)
+__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
+__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg)
+__SYSCALL(__NR_sysfs, sys_sysfs)
+
+#undef __SYSCALL
+
+#endif
diff --git a/tools/arch/arm64/include/asm/barrier.h b/tools/arch/arm64/include/asm/barrier.h
index 378c051fa177..3b9b41331c4f 100644
--- a/tools/arch/arm64/include/asm/barrier.h
+++ b/tools/arch/arm64/include/asm/barrier.h
@@ -14,6 +14,16 @@
#define wmb() asm volatile("dmb ishst" ::: "memory")
#define rmb() asm volatile("dmb ishld" ::: "memory")
+/*
+ * Kernel uses dmb variants on arm64 for smp_*() barriers. Pretty much the same
+ * implementation as above mb()/wmb()/rmb(), though for the latter kernel uses
+ * dsb. In any case, should above mb()/wmb()/rmb() change, make sure the below
+ * smp_*() don't.
+ */
+#define smp_mb() asm volatile("dmb ish" ::: "memory")
+#define smp_wmb() asm volatile("dmb ishst" ::: "memory")
+#define smp_rmb() asm volatile("dmb ishld" ::: "memory")
+
#define smp_store_release(p, v) \
do { \
union { typeof(*p) __val; char __c[1]; } __u = \
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 97c3478ee6e7..7b7ac0f6cec9 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -35,6 +35,7 @@
#include <linux/psci.h>
#include <linux/types.h>
#include <asm/ptrace.h>
+#include <asm/sve_context.h>
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
@@ -102,6 +103,9 @@ struct kvm_regs {
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
#define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */
#define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */
+#define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */
+#define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */
+#define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */
struct kvm_vcpu_init {
__u32 target;
@@ -226,6 +230,45 @@ struct kvm_vcpu_events {
KVM_REG_ARM_FW | ((r) & 0xffff))
#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
+/* SVE registers */
+#define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT)
+
+/* Z- and P-regs occupy blocks at the following offsets within this range: */
+#define KVM_REG_ARM64_SVE_ZREG_BASE 0
+#define KVM_REG_ARM64_SVE_PREG_BASE 0x400
+#define KVM_REG_ARM64_SVE_FFR_BASE 0x600
+
+#define KVM_ARM64_SVE_NUM_ZREGS __SVE_NUM_ZREGS
+#define KVM_ARM64_SVE_NUM_PREGS __SVE_NUM_PREGS
+
+#define KVM_ARM64_SVE_MAX_SLICES 32
+
+#define KVM_REG_ARM64_SVE_ZREG(n, i) \
+ (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_ZREG_BASE | \
+ KVM_REG_SIZE_U2048 | \
+ (((n) & (KVM_ARM64_SVE_NUM_ZREGS - 1)) << 5) | \
+ ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
+
+#define KVM_REG_ARM64_SVE_PREG(n, i) \
+ (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_PREG_BASE | \
+ KVM_REG_SIZE_U256 | \
+ (((n) & (KVM_ARM64_SVE_NUM_PREGS - 1)) << 5) | \
+ ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
+
+#define KVM_REG_ARM64_SVE_FFR(i) \
+ (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_FFR_BASE | \
+ KVM_REG_SIZE_U256 | \
+ ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
+
+#define KVM_ARM64_SVE_VQ_MIN __SVE_VQ_MIN
+#define KVM_ARM64_SVE_VQ_MAX __SVE_VQ_MAX
+
+/* Vector lengths pseudo-register: */
+#define KVM_REG_ARM64_SVE_VLS (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | \
+ KVM_REG_SIZE_U512 | 0xffff)
+#define KVM_ARM64_SVE_VLS_WORDS \
+ ((KVM_ARM64_SVE_VQ_MAX - KVM_ARM64_SVE_VQ_MIN) / 64 + 1)
+
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
diff --git a/tools/arch/arm64/include/uapi/asm/unistd.h b/tools/arch/arm64/include/uapi/asm/unistd.h
index dae1584cf017..4703d218663a 100644
--- a/tools/arch/arm64/include/uapi/asm/unistd.h
+++ b/tools/arch/arm64/include/uapi/asm/unistd.h
@@ -17,5 +17,7 @@
#define __ARCH_WANT_RENAMEAT
#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_TIME32_SYSCALLS
#include <asm-generic/unistd.h>
diff --git a/tools/arch/csky/include/uapi/asm/perf_regs.h b/tools/arch/csky/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..ee323d818592
--- /dev/null
+++ b/tools/arch/csky/include/uapi/asm/perf_regs.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef _ASM_CSKY_PERF_REGS_H
+#define _ASM_CSKY_PERF_REGS_H
+
+/* Index of struct pt_regs */
+enum perf_event_csky_regs {
+ PERF_REG_CSKY_TLS,
+ PERF_REG_CSKY_LR,
+ PERF_REG_CSKY_PC,
+ PERF_REG_CSKY_SR,
+ PERF_REG_CSKY_SP,
+ PERF_REG_CSKY_ORIG_A0,
+ PERF_REG_CSKY_A0,
+ PERF_REG_CSKY_A1,
+ PERF_REG_CSKY_A2,
+ PERF_REG_CSKY_A3,
+ PERF_REG_CSKY_REGS0,
+ PERF_REG_CSKY_REGS1,
+ PERF_REG_CSKY_REGS2,
+ PERF_REG_CSKY_REGS3,
+ PERF_REG_CSKY_REGS4,
+ PERF_REG_CSKY_REGS5,
+ PERF_REG_CSKY_REGS6,
+ PERF_REG_CSKY_REGS7,
+ PERF_REG_CSKY_REGS8,
+ PERF_REG_CSKY_REGS9,
+#if defined(__CSKYABIV2__)
+ PERF_REG_CSKY_EXREGS0,
+ PERF_REG_CSKY_EXREGS1,
+ PERF_REG_CSKY_EXREGS2,
+ PERF_REG_CSKY_EXREGS3,
+ PERF_REG_CSKY_EXREGS4,
+ PERF_REG_CSKY_EXREGS5,
+ PERF_REG_CSKY_EXREGS6,
+ PERF_REG_CSKY_EXREGS7,
+ PERF_REG_CSKY_EXREGS8,
+ PERF_REG_CSKY_EXREGS9,
+ PERF_REG_CSKY_EXREGS10,
+ PERF_REG_CSKY_EXREGS11,
+ PERF_REG_CSKY_EXREGS12,
+ PERF_REG_CSKY_EXREGS13,
+ PERF_REG_CSKY_EXREGS14,
+ PERF_REG_CSKY_HI,
+ PERF_REG_CSKY_LO,
+ PERF_REG_CSKY_DCSR,
+#endif
+ PERF_REG_CSKY_MAX,
+};
+#endif /* _ASM_CSKY_PERF_REGS_H */
diff --git a/tools/arch/hexagon/include/uapi/asm/unistd.h b/tools/arch/hexagon/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..432c4db1b623
--- /dev/null
+++ b/tools/arch/hexagon/include/uapi/asm/unistd.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Syscall support for Hexagon
+ *
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+/*
+ * The kernel pulls this unistd.h in three different ways:
+ * 1. the "normal" way which gets all the __NR defines
+ * 2. with __SYSCALL defined to produce function declarations
+ * 3. with __SYSCALL defined to produce syscall table initialization
+ * See also: syscalltab.c
+ */
+
+#define sys_mmap2 sys_mmap_pgoff
+#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_SYS_EXECVE
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+#define __ARCH_WANT_TIME32_SYSCALLS
+
+#include <asm-generic/unistd.h>
diff --git a/tools/arch/mips/include/uapi/asm/mman.h b/tools/arch/mips/include/uapi/asm/mman.h
index de2206883abc..c8acaa138d46 100644
--- a/tools/arch/mips/include/uapi/asm/mman.h
+++ b/tools/arch/mips/include/uapi/asm/mman.h
@@ -28,8 +28,6 @@
#define MAP_NONBLOCK 0x20000
#define MAP_NORESERVE 0x0400
#define MAP_POPULATE 0x10000
-#define MAP_PRIVATE 0x002
-#define MAP_SHARED 0x001
#define MAP_STACK 0x40000
#define PROT_EXEC 0x04
#define PROT_GROWSDOWN 0x01000000
diff --git a/tools/arch/parisc/include/uapi/asm/mman.h b/tools/arch/parisc/include/uapi/asm/mman.h
index 1bd78758bde9..f9fd1325f5bd 100644
--- a/tools/arch/parisc/include/uapi/asm/mman.h
+++ b/tools/arch/parisc/include/uapi/asm/mman.h
@@ -27,8 +27,6 @@
#define MAP_NONBLOCK 0x20000
#define MAP_NORESERVE 0x4000
#define MAP_POPULATE 0x10000
-#define MAP_PRIVATE 0x02
-#define MAP_SHARED 0x01
#define MAP_STACK 0x40000
#define PROT_EXEC 0x4
#define PROT_GROWSDOWN 0x01000000
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h
index 8c876c166ef2..b0f72dea8b11 100644
--- a/tools/arch/powerpc/include/uapi/asm/kvm.h
+++ b/tools/arch/powerpc/include/uapi/asm/kvm.h
@@ -463,10 +463,12 @@ struct kvm_ppc_cpu_char {
#define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED (1ULL << 58)
#define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF (1ULL << 57)
#define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS (1ULL << 56)
+#define KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54)
#define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY (1ULL << 63)
#define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR (1ULL << 62)
#define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ULL << 61)
+#define KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58)
/* Per-vcpu XICS interrupt controller state */
#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
@@ -480,6 +482,8 @@ struct kvm_ppc_cpu_char {
#define KVM_REG_PPC_ICP_PPRI_SHIFT 16 /* pending irq priority */
#define KVM_REG_PPC_ICP_PPRI_MASK 0xff
+#define KVM_REG_PPC_VP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x8d)
+
/* Device control API: PPC-specific devices */
#define KVM_DEV_MPIC_GRP_MISC 1
#define KVM_DEV_MPIC_BASE_ADDR 0 /* 64-bit */
@@ -675,4 +679,48 @@ struct kvm_ppc_cpu_char {
#define KVM_XICS_PRESENTED (1ULL << 43)
#define KVM_XICS_QUEUED (1ULL << 44)
+/* POWER9 XIVE Native Interrupt Controller */
+#define KVM_DEV_XIVE_GRP_CTRL 1
+#define KVM_DEV_XIVE_RESET 1
+#define KVM_DEV_XIVE_EQ_SYNC 2
+#define KVM_DEV_XIVE_GRP_SOURCE 2 /* 64-bit source identifier */
+#define KVM_DEV_XIVE_GRP_SOURCE_CONFIG 3 /* 64-bit source identifier */
+#define KVM_DEV_XIVE_GRP_EQ_CONFIG 4 /* 64-bit EQ identifier */
+#define KVM_DEV_XIVE_GRP_SOURCE_SYNC 5 /* 64-bit source identifier */
+
+/* Layout of 64-bit XIVE source attribute values */
+#define KVM_XIVE_LEVEL_SENSITIVE (1ULL << 0)
+#define KVM_XIVE_LEVEL_ASSERTED (1ULL << 1)
+
+/* Layout of 64-bit XIVE source configuration attribute values */
+#define KVM_XIVE_SOURCE_PRIORITY_SHIFT 0
+#define KVM_XIVE_SOURCE_PRIORITY_MASK 0x7
+#define KVM_XIVE_SOURCE_SERVER_SHIFT 3
+#define KVM_XIVE_SOURCE_SERVER_MASK 0xfffffff8ULL
+#define KVM_XIVE_SOURCE_MASKED_SHIFT 32
+#define KVM_XIVE_SOURCE_MASKED_MASK 0x100000000ULL
+#define KVM_XIVE_SOURCE_EISN_SHIFT 33
+#define KVM_XIVE_SOURCE_EISN_MASK 0xfffffffe00000000ULL
+
+/* Layout of 64-bit EQ identifier */
+#define KVM_XIVE_EQ_PRIORITY_SHIFT 0
+#define KVM_XIVE_EQ_PRIORITY_MASK 0x7
+#define KVM_XIVE_EQ_SERVER_SHIFT 3
+#define KVM_XIVE_EQ_SERVER_MASK 0xfffffff8ULL
+
+/* Layout of EQ configuration values (64 bytes) */
+struct kvm_ppc_xive_eq {
+ __u32 flags;
+ __u32 qshift;
+ __u64 qaddr;
+ __u32 qtoggle;
+ __u32 qindex;
+ __u8 pad[40];
+};
+
+#define KVM_XIVE_EQ_ALWAYS_NOTIFY 0x00000001
+
+#define KVM_XIVE_TIMA_PAGE_OFFSET 0
+#define KVM_XIVE_ESB_PAGE_OFFSET 4
+
#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/tools/arch/riscv/include/uapi/asm/bitsperlong.h b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
index 0b3cb52fd29d..0b9b58b57ff6 100644
--- a/tools/arch/riscv/include/uapi/asm/bitsperlong.h
+++ b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2015 Regents of the University of California
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
diff --git a/tools/arch/riscv/include/uapi/asm/unistd.h b/tools/arch/riscv/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..0e2eeeb1fd27
--- /dev/null
+++ b/tools/arch/riscv/include/uapi/asm/unistd.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2018 David Abdurachmanov <david.abdurachmanov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifdef __LP64__
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SET_GET_RLIMIT
+#endif /* __LP64__ */
+
+#include <asm-generic/unistd.h>
+
+/*
+ * Allows the instruction cache to be flushed from userspace. Despite RISC-V
+ * having a direct 'fence.i' instruction available to userspace (which we
+ * can't trap!), that's not actually viable when running on Linux because the
+ * kernel might schedule a process on another hart. There is no way for
+ * userspace to handle this without invoking the kernel (as it doesn't know the
+ * thread->hart mappings), so we've defined a RISC-V specific system call to
+ * flush the instruction cache.
+ *
+ * __NR_riscv_flush_icache is defined to flush the instruction cache over an
+ * address range, with the flush applying to either all threads or just the
+ * caller. We don't currently do anything with the address range, that's just
+ * in there for forwards compatibility.
+ */
+#ifndef __NR_riscv_flush_icache
+#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
+#endif
+__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index 16511d97e8dc..47104e5b47fd 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -152,7 +152,10 @@ struct kvm_s390_vm_cpu_subfunc {
__u8 pcc[16]; /* with MSA4 */
__u8 ppno[16]; /* with MSA5 */
__u8 kma[16]; /* with MSA8 */
- __u8 reserved[1808];
+ __u8 kdsa[16]; /* with MSA9 */
+ __u8 sortl[32]; /* with STFLE.150 */
+ __u8 dfltcc[32]; /* with STFLE.151 */
+ __u8 reserved[1728];
};
/* kvm attributes for crypto */
diff --git a/tools/arch/x86/include/asm/barrier.h b/tools/arch/x86/include/asm/barrier.h
index 58919868473c..0adf295dd5b6 100644
--- a/tools/arch/x86/include/asm/barrier.h
+++ b/tools/arch/x86/include/asm/barrier.h
@@ -21,9 +21,12 @@
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#elif defined(__x86_64__)
-#define mb() asm volatile("mfence":::"memory")
-#define rmb() asm volatile("lfence":::"memory")
+#define mb() asm volatile("mfence" ::: "memory")
+#define rmb() asm volatile("lfence" ::: "memory")
#define wmb() asm volatile("sfence" ::: "memory")
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
#endif
#if defined(__x86_64__)
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 6d6122524711..75f27ee2c263 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -344,6 +344,8 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
+#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
@@ -381,5 +383,7 @@
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
+#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
+#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index dabfcf7c3941..24a8cd229df6 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -381,6 +381,7 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
+#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
@@ -391,7 +392,7 @@ struct kvm_sync_regs {
struct kvm_vmx_nested_state {
__u64 vmxon_pa;
- __u64 vmcs_pa;
+ __u64 vmcs12_pa;
struct {
__u16 flags;
diff --git a/tools/arch/x86/include/uapi/asm/perf_regs.h b/tools/arch/x86/include/uapi/asm/perf_regs.h
index f3329cabce5c..7c9d2bb3833b 100644
--- a/tools/arch/x86/include/uapi/asm/perf_regs.h
+++ b/tools/arch/x86/include/uapi/asm/perf_regs.h
@@ -27,8 +27,32 @@ enum perf_event_x86_regs {
PERF_REG_X86_R13,
PERF_REG_X86_R14,
PERF_REG_X86_R15,
-
+ /* These are the limits for the GPRs. */
PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1,
PERF_REG_X86_64_MAX = PERF_REG_X86_R15 + 1,
+
+ /* These all need two bits set because they are 128bit */
+ PERF_REG_X86_XMM0 = 32,
+ PERF_REG_X86_XMM1 = 34,
+ PERF_REG_X86_XMM2 = 36,
+ PERF_REG_X86_XMM3 = 38,
+ PERF_REG_X86_XMM4 = 40,
+ PERF_REG_X86_XMM5 = 42,
+ PERF_REG_X86_XMM6 = 44,
+ PERF_REG_X86_XMM7 = 46,
+ PERF_REG_X86_XMM8 = 48,
+ PERF_REG_X86_XMM9 = 50,
+ PERF_REG_X86_XMM10 = 52,
+ PERF_REG_X86_XMM11 = 54,
+ PERF_REG_X86_XMM12 = 56,
+ PERF_REG_X86_XMM13 = 58,
+ PERF_REG_X86_XMM14 = 60,
+ PERF_REG_X86_XMM15 = 62,
+
+ /* These include both GPRs and XMMX registers */
+ PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2,
};
+
+#define PERF_REG_EXTENDED_MASK (~((1ULL << PERF_REG_X86_XMM0) - 1))
+
#endif /* _ASM_X86_PERF_REGS_H */
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h
index f0b0c90dd398..d213ec5c3766 100644
--- a/tools/arch/x86/include/uapi/asm/vmx.h
+++ b/tools/arch/x86/include/uapi/asm/vmx.h
@@ -146,6 +146,7 @@
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2
+#define VMX_ABORT_VMCS_CORRUPTED 3
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
#endif /* _UAPIVMX_H */
diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S
index 3b24dc05251c..92748660ba51 100644
--- a/tools/arch/x86/lib/memcpy_64.S
+++ b/tools/arch/x86/lib/memcpy_64.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright 2002 Andi Kleen */
#include <linux/linkage.h>
@@ -257,6 +258,7 @@ ENTRY(__memcpy_mcsafe)
/* Copy successful. Return zero */
.L_done_memcpy_trap:
xorl %eax, %eax
+.L_done:
ret
ENDPROC(__memcpy_mcsafe)
EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
@@ -273,7 +275,7 @@ EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
addl %edx, %ecx
.E_trailing_bytes:
mov %ecx, %eax
- ret
+ jmp .L_done
/*
* For write fault handling, given the destination is unaligned,
diff --git a/tools/arch/xtensa/include/uapi/asm/mman.h b/tools/arch/xtensa/include/uapi/asm/mman.h
index 34dde6f44dae..f2b08c990afc 100644
--- a/tools/arch/xtensa/include/uapi/asm/mman.h
+++ b/tools/arch/xtensa/include/uapi/asm/mman.h
@@ -27,8 +27,6 @@
#define MAP_NONBLOCK 0x20000
#define MAP_NORESERVE 0x0400
#define MAP_POPULATE 0x10000
-#define MAP_PRIVATE 0x002
-#define MAP_SHARED 0x001
#define MAP_STACK 0x40000
#define PROT_EXEC 0x4
#define PROT_GROWSDOWN 0x01000000
diff --git a/tools/bpf/Makefile.helpers b/tools/bpf/Makefile.helpers
index c34fea77f39f..854d084026dd 100644
--- a/tools/bpf/Makefile.helpers
+++ b/tools/bpf/Makefile.helpers
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
ifndef allow-override
include ../scripts/Makefile.include
include ../scripts/utilities.mak
diff --git a/tools/bpf/bpf_asm.c b/tools/bpf/bpf_asm.c
index c15aef097b04..e5f95e3eede3 100644
--- a/tools/bpf/bpf_asm.c
+++ b/tools/bpf/bpf_asm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Minimal BPF assembler
*
@@ -16,7 +17,6 @@
* pretty print a C-like construct.
*
* Copyright 2013 Daniel Borkmann <borkmann@redhat.com>
- * Licensed under the GNU General Public License, version 2.0 (GPLv2)
*/
#include <stdbool.h>
diff --git a/tools/bpf/bpf_dbg.c b/tools/bpf/bpf_dbg.c
index 61b9aa5d6415..9d3766e653a9 100644
--- a/tools/bpf/bpf_dbg.c
+++ b/tools/bpf/bpf_dbg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Minimal BPF debugger
*
@@ -28,7 +29,6 @@
* 7) > step [-<n>, +<n>] (performs single stepping through the BPF)
*
* Copyright 2013 Daniel Borkmann <borkmann@redhat.com>
- * Licensed under the GNU General Public License, version 2.0 (GPLv2)
*/
#include <stdio.h>
diff --git a/tools/bpf/bpf_jit_disasm.c b/tools/bpf/bpf_jit_disasm.c
index 58c2bab4ef6e..c8ae95804728 100644
--- a/tools/bpf/bpf_jit_disasm.c
+++ b/tools/bpf/bpf_jit_disasm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Minimal BPF JIT image disassembler
*
@@ -11,7 +12,6 @@
* 3) Run e.g. `bpf_jit_disasm -o` to read out the last JIT code
*
* Copyright 2013 Daniel Borkmann <borkmann@redhat.com>
- * Licensed under the GNU General Public License, version 2.0 (GPLv2)
*/
#include <stdint.h>
diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore
index 67167e44b726..8248b8dd89d4 100644
--- a/tools/bpf/bpftool/.gitignore
+++ b/tools/bpf/bpftool/.gitignore
@@ -1,5 +1,5 @@
*.d
-bpftool
+/bpftool
bpftool*.8
bpf-helpers.*
FEATURE-DUMP.bpftool
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
index f7663a3e60c9..815ac9804aee 100644
--- a/tools/bpf/bpftool/Documentation/Makefile
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
include ../../../scripts/Makefile.include
include ../../../scripts/utilities.mak
diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
new file mode 100644
index 000000000000..2dbc1413fabd
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
@@ -0,0 +1,222 @@
+================
+bpftool-btf
+================
+-------------------------------------------------------------------------------
+tool for inspection of BTF data
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **btf** *COMMAND*
+
+ *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] }
+
+ *COMMANDS* := { **dump** | **help** }
+
+BTF COMMANDS
+=============
+
+| **bpftool** **btf dump** *BTF_SRC*
+| **bpftool** **btf help**
+|
+| *BTF_SRC* := { **id** *BTF_ID* | **prog** *PROG* | **map** *MAP* [{**key** | **value** | **kv** | **all**}] | **file** *FILE* }
+| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
+| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
+
+DESCRIPTION
+===========
+ **bpftool btf dump** *BTF_SRC*
+ Dump BTF entries from a given *BTF_SRC*.
+
+ When **id** is specified, BTF object with that ID will be
+ loaded and all its BTF types emitted.
+
+ When **map** is provided, it's expected that map has
+ associated BTF object with BTF types describing key and
+ value. It's possible to select whether to dump only BTF
+ type(s) associated with key (**key**), value (**value**),
+ both key and value (**kv**), or all BTF types present in
+ associated BTF object (**all**). If not specified, **kv**
+ is assumed.
+
+ When **prog** is provided, it's expected that program has
+ associated BTF object with BTF types.
+
+ When specifying *FILE*, an ELF file is expected, containing
+ .BTF section with well-defined BTF binary format data,
+ typically produced by clang or pahole.
+
+ **bpftool btf help**
+ Print short help message.
+
+OPTIONS
+=======
+ -h, --help
+ Print short generic help message (similar to **bpftool help**).
+
+ -V, --version
+ Print version number (similar to **bpftool version**).
+
+ -j, --json
+ Generate JSON output. For commands that cannot produce JSON, this
+ option has no effect.
+
+ -p, --pretty
+ Generate human-readable JSON output. Implies **-j**.
+
+EXAMPLES
+========
+**# bpftool btf dump id 1226**
+::
+
+ [1] PTR '(anon)' type_id=2
+ [2] STRUCT 'dummy_tracepoint_args' size=16 vlen=2
+ 'pad' type_id=3 bits_offset=0
+ 'sock' type_id=4 bits_offset=64
+ [3] INT 'long long unsigned int' size=8 bits_offset=0 nr_bits=64 encoding=(none)
+ [4] PTR '(anon)' type_id=5
+ [5] FWD 'sock' fwd_kind=union
+
+This gives an example of default output for all supported BTF kinds.
+
+**$ cat prog.c**
+::
+
+ struct fwd_struct;
+
+ enum my_enum {
+ VAL1 = 3,
+ VAL2 = 7,
+ };
+
+ typedef struct my_struct my_struct_t;
+
+ struct my_struct {
+ const unsigned int const_int_field;
+ int bitfield_field: 4;
+ char arr_field[16];
+ const struct fwd_struct *restrict fwd_field;
+ enum my_enum enum_field;
+ volatile my_struct_t *typedef_ptr_field;
+ };
+
+ union my_union {
+ int a;
+ struct my_struct b;
+ };
+
+ struct my_struct struct_global_var __attribute__((section("data_sec"))) = {
+ .bitfield_field = 3,
+ .enum_field = VAL1,
+ };
+ int global_var __attribute__((section("data_sec"))) = 7;
+
+ __attribute__((noinline))
+ int my_func(union my_union *arg1, int arg2)
+ {
+ static int static_var __attribute__((section("data_sec"))) = 123;
+ static_var++;
+ return static_var;
+ }
+
+**$ bpftool btf dump file prog.o**
+::
+
+ [1] PTR '(anon)' type_id=2
+ [2] UNION 'my_union' size=48 vlen=2
+ 'a' type_id=3 bits_offset=0
+ 'b' type_id=4 bits_offset=0
+ [3] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED
+ [4] STRUCT 'my_struct' size=48 vlen=6
+ 'const_int_field' type_id=5 bits_offset=0
+ 'bitfield_field' type_id=3 bits_offset=32 bitfield_size=4
+ 'arr_field' type_id=8 bits_offset=40
+ 'fwd_field' type_id=10 bits_offset=192
+ 'enum_field' type_id=14 bits_offset=256
+ 'typedef_ptr_field' type_id=15 bits_offset=320
+ [5] CONST '(anon)' type_id=6
+ [6] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)
+ [7] INT 'char' size=1 bits_offset=0 nr_bits=8 encoding=SIGNED
+ [8] ARRAY '(anon)' type_id=7 index_type_id=9 nr_elems=16
+ [9] INT '__ARRAY_SIZE_TYPE__' size=4 bits_offset=0 nr_bits=32 encoding=(none)
+ [10] RESTRICT '(anon)' type_id=11
+ [11] PTR '(anon)' type_id=12
+ [12] CONST '(anon)' type_id=13
+ [13] FWD 'fwd_struct' fwd_kind=union
+ [14] ENUM 'my_enum' size=4 vlen=2
+ 'VAL1' val=3
+ 'VAL2' val=7
+ [15] PTR '(anon)' type_id=16
+ [16] VOLATILE '(anon)' type_id=17
+ [17] TYPEDEF 'my_struct_t' type_id=4
+ [18] FUNC_PROTO '(anon)' ret_type_id=3 vlen=2
+ 'arg1' type_id=1
+ 'arg2' type_id=3
+ [19] FUNC 'my_func' type_id=18
+ [20] VAR 'struct_global_var' type_id=4, linkage=global-alloc
+ [21] VAR 'global_var' type_id=3, linkage=global-alloc
+ [22] VAR 'my_func.static_var' type_id=3, linkage=static
+ [23] DATASEC 'data_sec' size=0 vlen=3
+ type_id=20 offset=0 size=48
+ type_id=21 offset=0 size=4
+ type_id=22 offset=52 size=4
+
+The following commands print BTF types associated with specified map's key,
+value, both key and value, and all BTF types, respectively. By default, both
+key and value types will be printed.
+
+**# bpftool btf dump map id 123 key**
+
+::
+
+ [39] TYPEDEF 'u32' type_id=37
+
+**# bpftool btf dump map id 123 value**
+
+::
+
+ [86] PTR '(anon)' type_id=87
+
+**# bpftool btf dump map id 123 kv**
+
+::
+
+ [39] TYPEDEF 'u32' type_id=37
+ [86] PTR '(anon)' type_id=87
+
+**# bpftool btf dump map id 123 all**
+
+::
+
+ [1] PTR '(anon)' type_id=0
+ .
+ .
+ .
+ [2866] ARRAY '(anon)' type_id=52 index_type_id=51 nr_elems=4
+
+All the standard ways to specify map or program are supported:
+
+**# bpftool btf dump map id 123**
+
+**# bpftool btf dump map pinned /sys/fs/bpf/map_name**
+
+**# bpftool btf dump prog id 456**
+
+**# bpftool btf dump prog tag b88e0a09b1d9759d**
+
+**# bpftool btf dump prog pinned /sys/fs/bpf/prog_name**
+
+SEE ALSO
+========
+ **bpf**\ (2),
+ **bpf-helpers**\ (7),
+ **bpftool**\ (8),
+ **bpftool-map**\ (8),
+ **bpftool-prog**\ (8),
+ **bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
+ **bpftool-net**\ (8),
+ **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index d07ccf8a23f7..e744b3e4e56a 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -17,8 +17,8 @@ SYNOPSIS
*COMMANDS* :=
{ **show** | **list** | **tree** | **attach** | **detach** | **help** }
-MAP COMMANDS
-=============
+CGROUP COMMANDS
+===============
| **bpftool** **cgroup { show | list }** *CGROUP*
| **bpftool** **cgroup tree** [*CGROUP_ROOT*]
@@ -29,7 +29,7 @@ MAP COMMANDS
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
| *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** |
| **bind4** | **bind6** | **post_bind4** | **post_bind6** | **connect4** | **connect6** |
-| **sendmsg4** | **sendmsg6** }
+| **sendmsg4** | **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** }
| *ATTACH_FLAGS* := { **multi** | **override** }
DESCRIPTION
@@ -85,7 +85,12 @@ DESCRIPTION
**sendmsg4** call to sendto(2), sendmsg(2), sendmmsg(2) for an
unconnected udp4 socket (since 4.18);
**sendmsg6** call to sendto(2), sendmsg(2), sendmmsg(2) for an
- unconnected udp6 socket (since 4.18).
+ unconnected udp6 socket (since 4.18);
+ **recvmsg4** call to recvfrom(2), recvmsg(2), recvmmsg(2) for
+ an unconnected udp4 socket (since 5.2);
+ **recvmsg6** call to recvfrom(2), recvmsg(2), recvmmsg(2) for
+ an unconnected udp6 socket (since 5.2);
+ **sysctl** sysctl access (since 5.2).
**bpftool cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
Detach *PROG* from the cgroup *CGROUP* and attach type
@@ -99,7 +104,7 @@ OPTIONS
-h, --help
Print short generic help message (similar to **bpftool help**).
- -v, --version
+ -V, --version
Print version number (similar to **bpftool version**).
-j, --json
@@ -142,5 +147,7 @@ SEE ALSO
**bpftool**\ (8),
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8),
- **bpftool-perf**\ (8)
+ **bpftool-perf**\ (8),
+ **bpftool-btf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
new file mode 100644
index 000000000000..14180e887082
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
@@ -0,0 +1,86 @@
+===============
+bpftool-feature
+===============
+-------------------------------------------------------------------------------
+tool for inspection of eBPF-related parameters for Linux kernel or net device
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **feature** *COMMAND*
+
+ *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] }
+
+ *COMMANDS* := { **probe** | **help** }
+
+FEATURE COMMANDS
+================
+
+| **bpftool** **feature probe** [*COMPONENT*] [**macros** [**prefix** *PREFIX*]]
+| **bpftool** **feature help**
+|
+| *COMPONENT* := { **kernel** | **dev** *NAME* }
+
+DESCRIPTION
+===========
+ **bpftool feature probe** [**kernel**] [**macros** [**prefix** *PREFIX*]]
+ Probe the running kernel and dump a number of eBPF-related
+ parameters, such as availability of the **bpf()** system call,
+ JIT status, eBPF program types availability, eBPF helper
+ functions availability, and more.
+
+ If the **macros** keyword (but not the **-j** option) is
+ passed, a subset of the output is dumped as a list of
+ **#define** macros that are ready to be included in a C
+ header file, for example. If, additionally, **prefix** is
+ used to define a *PREFIX*, the provided string will be used
+ as a prefix to the names of the macros: this can be used to
+ avoid conflicts on macro names when including the output of
+ this command as a header file.
+
+ Keyword **kernel** can be omitted. If no probe target is
+ specified, probing the kernel is the default behaviour.
+
+ Note that when probed, some eBPF helpers (e.g.
+ **bpf_trace_printk**\ () or **bpf_probe_write_user**\ ()) may
+ print warnings to kernel logs.
+
+ **bpftool feature probe dev** *NAME* [**macros** [**prefix** *PREFIX*]]
+ Probe network device for supported eBPF features and dump
+ results to the console.
+
+ The two keywords **macros** and **prefix** have the same
+ role as when probing the kernel.
+
+ **bpftool feature help**
+ Print short help message.
+
+OPTIONS
+=======
+ -h, --help
+ Print short generic help message (similar to **bpftool help**).
+
+ -V, --version
+ Print version number (similar to **bpftool version**).
+
+ -j, --json
+ Generate JSON output. For commands that cannot produce JSON, this
+ option has no effect.
+
+ -p, --pretty
+ Generate human-readable JSON output. Implies **-j**.
+
+SEE ALSO
+========
+ **bpf**\ (2),
+ **bpf-helpers**\ (7),
+ **bpftool**\ (8),
+ **bpftool-prog**\ (8),
+ **bpftool-map**\ (8),
+ **bpftool-cgroup**\ (8),
+ **bpftool-net**\ (8),
+ **bpftool-perf**\ (8),
+ **bpftool-btf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index 64b001b4f777..13ef27b39f20 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -25,12 +25,17 @@ MAP COMMANDS
| **bpftool** **map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* \
| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*]
| **bpftool** **map dump** *MAP*
-| **bpftool** **map update** *MAP* **key** *DATA* **value** *VALUE* [*UPDATE_FLAGS*]
-| **bpftool** **map lookup** *MAP* **key** *DATA*
+| **bpftool** **map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*]
+| **bpftool** **map lookup** *MAP* [**key** *DATA*]
| **bpftool** **map getnext** *MAP* [**key** *DATA*]
| **bpftool** **map delete** *MAP* **key** *DATA*
| **bpftool** **map pin** *MAP* *FILE*
| **bpftool** **map event_pipe** *MAP* [**cpu** *N* **index** *M*]
+| **bpftool** **map peek** *MAP*
+| **bpftool** **map push** *MAP* **value** *VALUE*
+| **bpftool** **map pop** *MAP*
+| **bpftool** **map enqueue** *MAP* **value** *VALUE*
+| **bpftool** **map dequeue** *MAP*
| **bpftool** **map help**
|
| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
@@ -62,7 +67,7 @@ DESCRIPTION
**bpftool map dump** *MAP*
Dump all entries in a given *MAP*.
- **bpftool map update** *MAP* **key** *DATA* **value** *VALUE* [*UPDATE_FLAGS*]
+ **bpftool map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*]
Update map entry for a given *KEY*.
*UPDATE_FLAGS* can be one of: **any** update existing entry
@@ -75,7 +80,7 @@ DESCRIPTION
the bytes are parsed as decimal values, unless a "0x" prefix
(for hexadecimal) or a "0" prefix (for octal) is provided.
- **bpftool map lookup** *MAP* **key** *DATA*
+ **bpftool map lookup** *MAP* [**key** *DATA*]
Lookup **key** in the map.
**bpftool map getnext** *MAP* [**key** *DATA*]
@@ -107,6 +112,21 @@ DESCRIPTION
replace any existing ring. Any other application will stop
receiving events if it installed its rings earlier.
+ **bpftool map peek** *MAP*
+ Peek next **value** in the queue or stack.
+
+ **bpftool map push** *MAP* **value** *VALUE*
+ Push **value** onto the stack.
+
+ **bpftool map pop** *MAP*
+ Pop and print **value** from the stack.
+
+ **bpftool map enqueue** *MAP* **value** *VALUE*
+ Enqueue **value** into the queue.
+
+ **bpftool map dequeue** *MAP*
+ Dequeue and print **value** from the queue.
+
**bpftool map help**
Print short help message.
@@ -115,7 +135,7 @@ OPTIONS
-h, --help
Print short generic help message (similar to **bpftool help**).
- -v, --version
+ -V, --version
Print version number (similar to **bpftool version**).
-j, --json
@@ -236,5 +256,7 @@ SEE ALSO
**bpftool**\ (8),
**bpftool-prog**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8),
- **bpftool-perf**\ (8)
+ **bpftool-perf**\ (8),
+ **bpftool-btf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst
index ed87c9b619ad..934580850f42 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-net.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst
@@ -55,7 +55,7 @@ OPTIONS
-h, --help
Print short generic help message (similar to **bpftool help**).
- -v, --version
+ -V, --version
Print version number (similar to **bpftool version**).
-j, --json
@@ -142,4 +142,6 @@ SEE ALSO
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
- **bpftool-perf**\ (8)
+ **bpftool-feature**\ (8),
+ **bpftool-perf**\ (8),
+ **bpftool-btf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-perf.rst b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
index f4c5e5538bb8..0c7576523a21 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-perf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
@@ -43,7 +43,7 @@ OPTIONS
-h, --help
Print short generic help message (similar to **bpftool help**).
- -v, --version
+ -V, --version
Print version number (similar to **bpftool version**).
-j, --json
@@ -84,4 +84,6 @@ SEE ALSO
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
- **bpftool-net**\ (8)
+ **bpftool-feature**\ (8),
+ **bpftool-net**\ (8),
+ **bpftool-btf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 58c8369b77dd..018ecef8dc13 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -18,14 +18,14 @@ SYNOPSIS
{ **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load**
| **loadall** | **help** }
-MAP COMMANDS
+PROG COMMANDS
=============
| **bpftool** **prog { show | list }** [*PROG*]
| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual** | **linum**}]
| **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes** | **linum**}]
| **bpftool** **prog pin** *PROG* *FILE*
-| **bpftool** **prog { load | loadall }** *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*]
+| **bpftool** **prog { load | loadall }** *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] [**pinmaps** *MAP_DIR*]
| **bpftool** **prog attach** *PROG* *ATTACH_TYPE* [*MAP*]
| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*]
| **bpftool** **prog tracelog**
@@ -39,10 +39,11 @@ MAP COMMANDS
| **cgroup/sock** | **cgroup/dev** | **lwt_in** | **lwt_out** | **lwt_xmit** |
| **lwt_seg6local** | **sockops** | **sk_skb** | **sk_msg** | **lirc_mode2** |
| **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** |
-| **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6**
+| **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** |
+| **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/sysctl**
| }
| *ATTACH_TYPE* := {
-| **msg_verdict** | **skb_verdict** | **skb_parse** | **flow_dissector**
+| **msg_verdict** | **stream_verdict** | **stream_parser** | **flow_dissector**
| }
@@ -56,6 +57,14 @@ DESCRIPTION
Output will start with program ID followed by program type and
zero or more named attributes (depending on kernel version).
+ Since Linux 5.1 the kernel can collect statistics on BPF
+ programs (such as the total time spent running the program,
+ and the number of times it was run). If available, bpftool
+ shows such statistics. However, the kernel does not collect
+ them by defaults, as it slightly impacts performance on each
+ program run. Activation or deactivation of the feature is
+ performed via the **kernel.bpf_stats_enabled** sysctl knob.
+
**bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** | **visual** | **linum** }]
Dump eBPF instructions of the program from the kernel. By
default, eBPF will be disassembled and printed to standard
@@ -144,7 +153,7 @@ OPTIONS
-h, --help
Print short generic help message (similar to **bpftool help**).
- -v, --version
+ -V, --version
Print version number (similar to **bpftool version**).
-j, --json
@@ -171,7 +180,7 @@ EXAMPLES
::
- 10: xdp name some_prog tag 005a3d2123620c8b gpl
+ 10: xdp name some_prog tag 005a3d2123620c8b gpl run_time_ns 81632 run_cnt 10
loaded_at 2017-09-29T20:11:00+0000 uid 0
xlated 528B jited 370B memlock 4096B map_ids 10
@@ -184,6 +193,8 @@ EXAMPLES
"type": "xdp",
"tag": "005a3d2123620c8b",
"gpl_compatible": true,
+ "run_time_ns": 81632,
+ "run_cnt": 10,
"loaded_at": 1506715860,
"uid": 0,
"bytes_xlated": 528,
@@ -258,5 +269,7 @@ SEE ALSO
**bpftool**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8),
- **bpftool-perf**\ (8)
+ **bpftool-perf**\ (8),
+ **bpftool-btf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index e1677e81ed59..3e562d7fd56f 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -16,7 +16,7 @@ SYNOPSIS
**bpftool** **version**
- *OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** }
+ *OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** | **feature** }
*OPTIONS* := { { **-V** | **--version** } | { **-h** | **--help** }
| { **-j** | **--json** } [{ **-p** | **--pretty** }] }
@@ -34,6 +34,8 @@ SYNOPSIS
*NET-COMMANDS* := { **show** | **list** | **help** }
+ *FEATURE-COMMANDS* := { **probe** | **help** }
+
DESCRIPTION
===========
*bpftool* allows for inspection and simple modification of BPF objects
@@ -47,7 +49,7 @@ OPTIONS
-h, --help
Print short help message (similar to **bpftool help**).
- -v, --version
+ -V, --version
Print version number (similar to **bpftool version**).
-j, --json
@@ -72,5 +74,7 @@ SEE ALSO
**bpftool-prog**\ (8),
**bpftool-map**\ (8),
**bpftool-cgroup**\ (8),
+ **bpftool-feature**\ (8),
**bpftool-net**\ (8),
- **bpftool-perf**\ (8)
+ **bpftool-perf**\ (8),
+ **bpftool-btf**\ (8)
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 4ad1f0894d53..a7afea4dec47 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
include ../../scripts/Makefile.include
include ../../scripts/utilities.mak
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index e4e4fab1b8c7..4300adf6e5ab 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -50,14 +50,15 @@ _bpftool_get_map_ids()
command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) )
}
-_bpftool_get_perf_map_ids()
+# Takes map type and adds matching map ids to the list of suggestions.
+_bpftool_get_map_ids_for_type()
{
+ local type="$1"
COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
- command grep -C2 perf_event_array | \
+ command grep -C2 "$type" | \
command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) )
}
-
_bpftool_get_prog_ids()
{
COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \
@@ -99,15 +100,25 @@ _sysfs_get_netdevs()
"$cur" ) )
}
-# For bpftool map update: retrieve type of the map to update.
-_bpftool_map_update_map_type()
+# Retrieve type of the map that we are operating on.
+_bpftool_map_guess_map_type()
{
local keyword ref
for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
- if [[ ${words[$((idx-2))]} == "update" ]]; then
- keyword=${words[$((idx-1))]}
- ref=${words[$((idx))]}
- fi
+ case "${words[$((idx-2))]}" in
+ lookup|update)
+ keyword=${words[$((idx-1))]}
+ ref=${words[$((idx))]}
+ ;;
+ push)
+ printf "stack"
+ return 0
+ ;;
+ enqueue)
+ printf "queue"
+ return 0
+ ;;
+ esac
done
[[ -z $ref ]] && return 0
@@ -119,6 +130,8 @@ _bpftool_map_update_map_type()
_bpftool_map_update_get_id()
{
+ local command="$1"
+
# Is it the map to update, or a map to insert into the map to update?
# Search for "value" keyword.
local idx value
@@ -128,11 +141,24 @@ _bpftool_map_update_get_id()
break
fi
done
- [[ $value -eq 0 ]] && _bpftool_get_map_ids && return 0
+ if [[ $value -eq 0 ]]; then
+ case "$command" in
+ push)
+ _bpftool_get_map_ids_for_type stack
+ ;;
+ enqueue)
+ _bpftool_get_map_ids_for_type queue
+ ;;
+ *)
+ _bpftool_get_map_ids
+ ;;
+ esac
+ return 0
+ fi
# Id to complete is for a value. It can be either prog id or map id. This
# depends on the type of the map to update.
- local type=$(_bpftool_map_update_map_type)
+ local type=$(_bpftool_map_guess_map_type)
case $type in
array_of_maps|hash_of_maps)
_bpftool_get_map_ids
@@ -191,6 +217,7 @@ _bpftool()
done
cur=${words[cword]}
prev=${words[cword - 1]}
+ pprev=${words[cword - 2]}
local object=${words[1]} command=${words[2]}
@@ -246,17 +273,17 @@ _bpftool()
"$cur" ) )
return 0
;;
- *)
- _bpftool_once_attr 'file'
- if _bpftool_search_list 'xlated'; then
- COMPREPLY+=( $( compgen -W 'opcodes visual linum' -- \
- "$cur" ) )
- else
- COMPREPLY+=( $( compgen -W 'opcodes linum' -- \
- "$cur" ) )
- fi
- return 0
- ;;
+ *)
+ _bpftool_once_attr 'file'
+ if _bpftool_search_list 'xlated'; then
+ COMPREPLY+=( $( compgen -W 'opcodes visual linum' -- \
+ "$cur" ) )
+ else
+ COMPREPLY+=( $( compgen -W 'opcodes linum' -- \
+ "$cur" ) )
+ fi
+ return 0
+ ;;
esac
;;
pin)
@@ -285,8 +312,8 @@ _bpftool()
return 0
;;
5)
- COMPREPLY=( $( compgen -W 'msg_verdict skb_verdict \
- skb_parse flow_dissector' -- "$cur" ) )
+ COMPREPLY=( $( compgen -W 'msg_verdict stream_verdict \
+ stream_parser flow_dissector' -- "$cur" ) )
return 0
;;
6)
@@ -344,7 +371,9 @@ _bpftool()
lirc_mode2 cgroup/bind4 cgroup/bind6 \
cgroup/connect4 cgroup/connect6 \
cgroup/sendmsg4 cgroup/sendmsg6 \
- cgroup/post_bind4 cgroup/post_bind6" -- \
+ cgroup/recvmsg4 cgroup/recvmsg6 \
+ cgroup/post_bind4 cgroup/post_bind6 \
+ cgroup/sysctl" -- \
"$cur" ) )
return 0
;;
@@ -382,14 +411,28 @@ _bpftool()
map)
local MAP_TYPE='id pinned'
case $command in
- show|list|dump)
+ show|list|dump|peek|pop|dequeue)
case $prev in
$command)
COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
return 0
;;
id)
- _bpftool_get_map_ids
+ case "$command" in
+ peek)
+ _bpftool_get_map_ids_for_type stack
+ _bpftool_get_map_ids_for_type queue
+ ;;
+ pop)
+ _bpftool_get_map_ids_for_type stack
+ ;;
+ dequeue)
+ _bpftool_get_map_ids_for_type queue
+ ;;
+ *)
+ _bpftool_get_map_ids
+ ;;
+ esac
return 0
;;
*)
@@ -447,19 +490,25 @@ _bpftool()
COMPREPLY+=( $( compgen -W 'hex' -- "$cur" ) )
;;
*)
+ case $(_bpftool_map_guess_map_type) in
+ queue|stack)
+ return 0
+ ;;
+ esac
+
_bpftool_once_attr 'key'
return 0
;;
esac
;;
- update)
+ update|push|enqueue)
case $prev in
$command)
COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
return 0
;;
id)
- _bpftool_map_update_get_id
+ _bpftool_map_update_get_id $command
return 0
;;
key)
@@ -468,7 +517,7 @@ _bpftool()
value)
# We can have bytes, or references to a prog or a
# map, depending on the type of the map to update.
- case $(_bpftool_map_update_map_type) in
+ case "$(_bpftool_map_guess_map_type)" in
array_of_maps|hash_of_maps)
local MAP_TYPE='id pinned'
COMPREPLY+=( $( compgen -W "$MAP_TYPE" \
@@ -490,6 +539,13 @@ _bpftool()
return 0
;;
*)
+ case $(_bpftool_map_guess_map_type) in
+ queue|stack)
+ _bpftool_once_attr 'value'
+ return 0;
+ ;;
+ esac
+
_bpftool_once_attr 'key'
local UPDATE_FLAGS='any exist noexist'
for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
@@ -508,6 +564,7 @@ _bpftool()
return 0
fi
done
+
return 0
;;
esac
@@ -527,7 +584,7 @@ _bpftool()
return 0
;;
id)
- _bpftool_get_perf_map_ids
+ _bpftool_get_map_ids_for_type perf_event_array
return 0
;;
cpu)
@@ -546,11 +603,57 @@ _bpftool()
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'delete dump getnext help \
- lookup pin event_pipe show list update create' -- \
+ lookup pin event_pipe show list update create \
+ peek push enqueue pop dequeue' -- \
"$cur" ) )
;;
esac
;;
+ btf)
+ local PROG_TYPE='id pinned tag'
+ local MAP_TYPE='id pinned'
+ case $command in
+ dump)
+ case $prev in
+ $command)
+ COMPREPLY+=( $( compgen -W "id map prog file" -- \
+ "$cur" ) )
+ return 0
+ ;;
+ prog)
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ map)
+ COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ id)
+ case $pprev in
+ prog)
+ _bpftool_get_prog_ids
+ ;;
+ map)
+ _bpftool_get_map_ids
+ ;;
+ esac
+ return 0
+ ;;
+ *)
+ if [[ $cword == 6 ]] && [[ ${words[3]} == "map" ]]; then
+ COMPREPLY+=( $( compgen -W 'key value kv all' -- \
+ "$cur" ) )
+ fi
+ return 0
+ ;;
+ esac
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'dump help' -- "$cur" ) )
+ ;;
+ esac
+ ;;
cgroup)
case $command in
show|list)
@@ -564,7 +667,7 @@ _bpftool()
attach|detach)
local ATTACH_TYPES='ingress egress sock_create sock_ops \
device bind4 bind6 post_bind4 post_bind6 connect4 \
- connect6 sendmsg4 sendmsg6'
+ connect6 sendmsg4 sendmsg6 recvmsg4 recvmsg6 sysctl'
local ATTACH_FLAGS='multi override'
local PROG_TYPE='id pinned tag'
case $prev in
@@ -574,7 +677,7 @@ _bpftool()
;;
ingress|egress|sock_create|sock_ops|device|bind4|bind6|\
post_bind4|post_bind6|connect4|connect6|sendmsg4|\
- sendmsg6)
+ sendmsg6|recvmsg4|recvmsg6|sysctl)
COMPREPLY=( $( compgen -W "$PROG_TYPE" -- \
"$cur" ) )
return 0
@@ -624,6 +727,25 @@ _bpftool()
;;
esac
;;
+ feature)
+ case $command in
+ probe)
+ [[ $prev == "dev" ]] && _sysfs_get_netdevs && return 0
+ [[ $prev == "prefix" ]] && return 0
+ if _bpftool_search_list 'macros'; then
+ COMPREPLY+=( $( compgen -W 'prefix' -- "$cur" ) )
+ else
+ COMPREPLY+=( $( compgen -W 'macros' -- "$cur" ) )
+ fi
+ _bpftool_one_of_list 'kernel dev'
+ return 0
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'help probe' -- "$cur" ) )
+ ;;
+ esac
+ ;;
esac
} &&
complete -F _bpftool bpftool
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
new file mode 100644
index 000000000000..7317438ecd9e
--- /dev/null
+++ b/tools/bpf/bpftool/btf.c
@@ -0,0 +1,586 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Facebook */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/err.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <gelf.h>
+#include <bpf.h>
+#include <linux/btf.h>
+
+#include "btf.h"
+#include "json_writer.h"
+#include "main.h"
+
+static const char * const btf_kind_str[NR_BTF_KINDS] = {
+ [BTF_KIND_UNKN] = "UNKNOWN",
+ [BTF_KIND_INT] = "INT",
+ [BTF_KIND_PTR] = "PTR",
+ [BTF_KIND_ARRAY] = "ARRAY",
+ [BTF_KIND_STRUCT] = "STRUCT",
+ [BTF_KIND_UNION] = "UNION",
+ [BTF_KIND_ENUM] = "ENUM",
+ [BTF_KIND_FWD] = "FWD",
+ [BTF_KIND_TYPEDEF] = "TYPEDEF",
+ [BTF_KIND_VOLATILE] = "VOLATILE",
+ [BTF_KIND_CONST] = "CONST",
+ [BTF_KIND_RESTRICT] = "RESTRICT",
+ [BTF_KIND_FUNC] = "FUNC",
+ [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
+ [BTF_KIND_VAR] = "VAR",
+ [BTF_KIND_DATASEC] = "DATASEC",
+};
+
+static const char *btf_int_enc_str(__u8 encoding)
+{
+ switch (encoding) {
+ case 0:
+ return "(none)";
+ case BTF_INT_SIGNED:
+ return "SIGNED";
+ case BTF_INT_CHAR:
+ return "CHAR";
+ case BTF_INT_BOOL:
+ return "BOOL";
+ default:
+ return "UNKN";
+ }
+}
+
+static const char *btf_var_linkage_str(__u32 linkage)
+{
+ switch (linkage) {
+ case BTF_VAR_STATIC:
+ return "static";
+ case BTF_VAR_GLOBAL_ALLOCATED:
+ return "global-alloc";
+ default:
+ return "(unknown)";
+ }
+}
+
+static const char *btf_str(const struct btf *btf, __u32 off)
+{
+ if (!off)
+ return "(anon)";
+ return btf__name_by_offset(btf, off) ? : "(invalid)";
+}
+
+static int dump_btf_type(const struct btf *btf, __u32 id,
+ const struct btf_type *t)
+{
+ json_writer_t *w = json_wtr;
+ int kind, safe_kind;
+
+ kind = BTF_INFO_KIND(t->info);
+ safe_kind = kind <= BTF_KIND_MAX ? kind : BTF_KIND_UNKN;
+
+ if (json_output) {
+ jsonw_start_object(w);
+ jsonw_uint_field(w, "id", id);
+ jsonw_string_field(w, "kind", btf_kind_str[safe_kind]);
+ jsonw_string_field(w, "name", btf_str(btf, t->name_off));
+ } else {
+ printf("[%u] %s '%s'", id, btf_kind_str[safe_kind],
+ btf_str(btf, t->name_off));
+ }
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_INT: {
+ __u32 v = *(__u32 *)(t + 1);
+ const char *enc;
+
+ enc = btf_int_enc_str(BTF_INT_ENCODING(v));
+
+ if (json_output) {
+ jsonw_uint_field(w, "size", t->size);
+ jsonw_uint_field(w, "bits_offset", BTF_INT_OFFSET(v));
+ jsonw_uint_field(w, "nr_bits", BTF_INT_BITS(v));
+ jsonw_string_field(w, "encoding", enc);
+ } else {
+ printf(" size=%u bits_offset=%u nr_bits=%u encoding=%s",
+ t->size, BTF_INT_OFFSET(v), BTF_INT_BITS(v),
+ enc);
+ }
+ break;
+ }
+ case BTF_KIND_PTR:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPEDEF:
+ if (json_output)
+ jsonw_uint_field(w, "type_id", t->type);
+ else
+ printf(" type_id=%u", t->type);
+ break;
+ case BTF_KIND_ARRAY: {
+ const struct btf_array *arr = (const void *)(t + 1);
+
+ if (json_output) {
+ jsonw_uint_field(w, "type_id", arr->type);
+ jsonw_uint_field(w, "index_type_id", arr->index_type);
+ jsonw_uint_field(w, "nr_elems", arr->nelems);
+ } else {
+ printf(" type_id=%u index_type_id=%u nr_elems=%u",
+ arr->type, arr->index_type, arr->nelems);
+ }
+ break;
+ }
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ const struct btf_member *m = (const void *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+ int i;
+
+ if (json_output) {
+ jsonw_uint_field(w, "size", t->size);
+ jsonw_uint_field(w, "vlen", vlen);
+ jsonw_name(w, "members");
+ jsonw_start_array(w);
+ } else {
+ printf(" size=%u vlen=%u", t->size, vlen);
+ }
+ for (i = 0; i < vlen; i++, m++) {
+ const char *name = btf_str(btf, m->name_off);
+ __u32 bit_off, bit_sz;
+
+ if (BTF_INFO_KFLAG(t->info)) {
+ bit_off = BTF_MEMBER_BIT_OFFSET(m->offset);
+ bit_sz = BTF_MEMBER_BITFIELD_SIZE(m->offset);
+ } else {
+ bit_off = m->offset;
+ bit_sz = 0;
+ }
+
+ if (json_output) {
+ jsonw_start_object(w);
+ jsonw_string_field(w, "name", name);
+ jsonw_uint_field(w, "type_id", m->type);
+ jsonw_uint_field(w, "bits_offset", bit_off);
+ if (bit_sz) {
+ jsonw_uint_field(w, "bitfield_size",
+ bit_sz);
+ }
+ jsonw_end_object(w);
+ } else {
+ printf("\n\t'%s' type_id=%u bits_offset=%u",
+ name, m->type, bit_off);
+ if (bit_sz)
+ printf(" bitfield_size=%u", bit_sz);
+ }
+ }
+ if (json_output)
+ jsonw_end_array(w);
+ break;
+ }
+ case BTF_KIND_ENUM: {
+ const struct btf_enum *v = (const void *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+ int i;
+
+ if (json_output) {
+ jsonw_uint_field(w, "size", t->size);
+ jsonw_uint_field(w, "vlen", vlen);
+ jsonw_name(w, "values");
+ jsonw_start_array(w);
+ } else {
+ printf(" size=%u vlen=%u", t->size, vlen);
+ }
+ for (i = 0; i < vlen; i++, v++) {
+ const char *name = btf_str(btf, v->name_off);
+
+ if (json_output) {
+ jsonw_start_object(w);
+ jsonw_string_field(w, "name", name);
+ jsonw_uint_field(w, "val", v->val);
+ jsonw_end_object(w);
+ } else {
+ printf("\n\t'%s' val=%u", name, v->val);
+ }
+ }
+ if (json_output)
+ jsonw_end_array(w);
+ break;
+ }
+ case BTF_KIND_FWD: {
+ const char *fwd_kind = BTF_INFO_KFLAG(t->info) ? "union"
+ : "struct";
+
+ if (json_output)
+ jsonw_string_field(w, "fwd_kind", fwd_kind);
+ else
+ printf(" fwd_kind=%s", fwd_kind);
+ break;
+ }
+ case BTF_KIND_FUNC:
+ if (json_output)
+ jsonw_uint_field(w, "type_id", t->type);
+ else
+ printf(" type_id=%u", t->type);
+ break;
+ case BTF_KIND_FUNC_PROTO: {
+ const struct btf_param *p = (const void *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+ int i;
+
+ if (json_output) {
+ jsonw_uint_field(w, "ret_type_id", t->type);
+ jsonw_uint_field(w, "vlen", vlen);
+ jsonw_name(w, "params");
+ jsonw_start_array(w);
+ } else {
+ printf(" ret_type_id=%u vlen=%u", t->type, vlen);
+ }
+ for (i = 0; i < vlen; i++, p++) {
+ const char *name = btf_str(btf, p->name_off);
+
+ if (json_output) {
+ jsonw_start_object(w);
+ jsonw_string_field(w, "name", name);
+ jsonw_uint_field(w, "type_id", p->type);
+ jsonw_end_object(w);
+ } else {
+ printf("\n\t'%s' type_id=%u", name, p->type);
+ }
+ }
+ if (json_output)
+ jsonw_end_array(w);
+ break;
+ }
+ case BTF_KIND_VAR: {
+ const struct btf_var *v = (const void *)(t + 1);
+ const char *linkage;
+
+ linkage = btf_var_linkage_str(v->linkage);
+
+ if (json_output) {
+ jsonw_uint_field(w, "type_id", t->type);
+ jsonw_string_field(w, "linkage", linkage);
+ } else {
+ printf(" type_id=%u, linkage=%s", t->type, linkage);
+ }
+ break;
+ }
+ case BTF_KIND_DATASEC: {
+ const struct btf_var_secinfo *v = (const void *)(t+1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+ int i;
+
+ if (json_output) {
+ jsonw_uint_field(w, "size", t->size);
+ jsonw_uint_field(w, "vlen", vlen);
+ jsonw_name(w, "vars");
+ jsonw_start_array(w);
+ } else {
+ printf(" size=%u vlen=%u", t->size, vlen);
+ }
+ for (i = 0; i < vlen; i++, v++) {
+ if (json_output) {
+ jsonw_start_object(w);
+ jsonw_uint_field(w, "type_id", v->type);
+ jsonw_uint_field(w, "offset", v->offset);
+ jsonw_uint_field(w, "size", v->size);
+ jsonw_end_object(w);
+ } else {
+ printf("\n\ttype_id=%u offset=%u size=%u",
+ v->type, v->offset, v->size);
+ }
+ }
+ if (json_output)
+ jsonw_end_array(w);
+ break;
+ }
+ default:
+ break;
+ }
+
+ if (json_output)
+ jsonw_end_object(json_wtr);
+ else
+ printf("\n");
+
+ return 0;
+}
+
+static int dump_btf_raw(const struct btf *btf,
+ __u32 *root_type_ids, int root_type_cnt)
+{
+ const struct btf_type *t;
+ int i;
+
+ if (json_output) {
+ jsonw_start_object(json_wtr);
+ jsonw_name(json_wtr, "types");
+ jsonw_start_array(json_wtr);
+ }
+
+ if (root_type_cnt) {
+ for (i = 0; i < root_type_cnt; i++) {
+ t = btf__type_by_id(btf, root_type_ids[i]);
+ dump_btf_type(btf, root_type_ids[i], t);
+ }
+ } else {
+ int cnt = btf__get_nr_types(btf);
+
+ for (i = 1; i <= cnt; i++) {
+ t = btf__type_by_id(btf, i);
+ dump_btf_type(btf, i, t);
+ }
+ }
+
+ if (json_output) {
+ jsonw_end_array(json_wtr);
+ jsonw_end_object(json_wtr);
+ }
+ return 0;
+}
+
+static bool check_btf_endianness(GElf_Ehdr *ehdr)
+{
+ static unsigned int const endian = 1;
+
+ switch (ehdr->e_ident[EI_DATA]) {
+ case ELFDATA2LSB:
+ return *(unsigned char const *)&endian == 1;
+ case ELFDATA2MSB:
+ return *(unsigned char const *)&endian == 0;
+ default:
+ return 0;
+ }
+}
+
+static int btf_load_from_elf(const char *path, struct btf **btf)
+{
+ int err = -1, fd = -1, idx = 0;
+ Elf_Data *btf_data = NULL;
+ Elf_Scn *scn = NULL;
+ Elf *elf = NULL;
+ GElf_Ehdr ehdr;
+
+ if (elf_version(EV_CURRENT) == EV_NONE) {
+ p_err("failed to init libelf for %s", path);
+ return -1;
+ }
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ p_err("failed to open %s: %s", path, strerror(errno));
+ return -1;
+ }
+
+ elf = elf_begin(fd, ELF_C_READ, NULL);
+ if (!elf) {
+ p_err("failed to open %s as ELF file", path);
+ goto done;
+ }
+ if (!gelf_getehdr(elf, &ehdr)) {
+ p_err("failed to get EHDR from %s", path);
+ goto done;
+ }
+ if (!check_btf_endianness(&ehdr)) {
+ p_err("non-native ELF endianness is not supported");
+ goto done;
+ }
+ if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) {
+ p_err("failed to get e_shstrndx from %s\n", path);
+ goto done;
+ }
+
+ while ((scn = elf_nextscn(elf, scn)) != NULL) {
+ GElf_Shdr sh;
+ char *name;
+
+ idx++;
+ if (gelf_getshdr(scn, &sh) != &sh) {
+ p_err("failed to get section(%d) header from %s",
+ idx, path);
+ goto done;
+ }
+ name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name);
+ if (!name) {
+ p_err("failed to get section(%d) name from %s",
+ idx, path);
+ goto done;
+ }
+ if (strcmp(name, BTF_ELF_SEC) == 0) {
+ btf_data = elf_getdata(scn, 0);
+ if (!btf_data) {
+ p_err("failed to get section(%d, %s) data from %s",
+ idx, name, path);
+ goto done;
+ }
+ break;
+ }
+ }
+
+ if (!btf_data) {
+ p_err("%s ELF section not found in %s", BTF_ELF_SEC, path);
+ goto done;
+ }
+
+ *btf = btf__new(btf_data->d_buf, btf_data->d_size);
+ if (IS_ERR(*btf)) {
+ err = PTR_ERR(*btf);
+ *btf = NULL;
+ p_err("failed to load BTF data from %s: %s",
+ path, strerror(err));
+ goto done;
+ }
+
+ err = 0;
+done:
+ if (err) {
+ if (*btf) {
+ btf__free(*btf);
+ *btf = NULL;
+ }
+ }
+ if (elf)
+ elf_end(elf);
+ close(fd);
+ return err;
+}
+
+static int do_dump(int argc, char **argv)
+{
+ struct btf *btf = NULL;
+ __u32 root_type_ids[2];
+ int root_type_cnt = 0;
+ __u32 btf_id = -1;
+ const char *src;
+ int fd = -1;
+ int err;
+
+ if (!REQ_ARGS(2)) {
+ usage();
+ return -1;
+ }
+ src = GET_ARG();
+
+ if (is_prefix(src, "map")) {
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+
+ if (!REQ_ARGS(2)) {
+ usage();
+ return -1;
+ }
+
+ fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
+ if (fd < 0)
+ return -1;
+
+ btf_id = info.btf_id;
+ if (argc && is_prefix(*argv, "key")) {
+ root_type_ids[root_type_cnt++] = info.btf_key_type_id;
+ NEXT_ARG();
+ } else if (argc && is_prefix(*argv, "value")) {
+ root_type_ids[root_type_cnt++] = info.btf_value_type_id;
+ NEXT_ARG();
+ } else if (argc && is_prefix(*argv, "all")) {
+ NEXT_ARG();
+ } else if (argc && is_prefix(*argv, "kv")) {
+ root_type_ids[root_type_cnt++] = info.btf_key_type_id;
+ root_type_ids[root_type_cnt++] = info.btf_value_type_id;
+ NEXT_ARG();
+ } else {
+ root_type_ids[root_type_cnt++] = info.btf_key_type_id;
+ root_type_ids[root_type_cnt++] = info.btf_value_type_id;
+ }
+ } else if (is_prefix(src, "prog")) {
+ struct bpf_prog_info info = {};
+ __u32 len = sizeof(info);
+
+ if (!REQ_ARGS(2)) {
+ usage();
+ return -1;
+ }
+
+ fd = prog_parse_fd(&argc, &argv);
+ if (fd < 0)
+ return -1;
+
+ err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ if (err) {
+ p_err("can't get prog info: %s", strerror(errno));
+ goto done;
+ }
+
+ btf_id = info.btf_id;
+ } else if (is_prefix(src, "id")) {
+ char *endptr;
+
+ btf_id = strtoul(*argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as ID", **argv);
+ return -1;
+ }
+ NEXT_ARG();
+ } else if (is_prefix(src, "file")) {
+ err = btf_load_from_elf(*argv, &btf);
+ if (err)
+ goto done;
+ NEXT_ARG();
+ } else {
+ err = -1;
+ p_err("unrecognized BTF source specifier: '%s'", src);
+ goto done;
+ }
+
+ if (!btf) {
+ err = btf__get_from_id(btf_id, &btf);
+ if (err) {
+ p_err("get btf by id (%u): %s", btf_id, strerror(err));
+ goto done;
+ }
+ if (!btf) {
+ err = ENOENT;
+ p_err("can't find btf with ID (%u)", btf_id);
+ goto done;
+ }
+ }
+
+ dump_btf_raw(btf, root_type_ids, root_type_cnt);
+
+done:
+ close(fd);
+ btf__free(btf);
+ return err;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %s btf dump BTF_SRC\n"
+ " %s btf help\n"
+ "\n"
+ " BTF_SRC := { id BTF_ID | prog PROG | map MAP [{key | value | kv | all}] | file FILE }\n"
+ " " HELP_SPEC_MAP "\n"
+ " " HELP_SPEC_PROGRAM "\n"
+ " " HELP_SPEC_OPTIONS "\n"
+ "",
+ bin_name, bin_name);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "help", do_help },
+ { "dump", do_dump },
+ { 0 }
+};
+
+int do_btf(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index 6ba5f567a9d8..8cafb9b31467 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -73,35 +73,104 @@ static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id,
return ret;
}
+static void btf_int128_print(json_writer_t *jw, const void *data,
+ bool is_plain_text)
+{
+ /* data points to a __int128 number.
+ * Suppose
+ * int128_num = *(__int128 *)data;
+ * The below formulas shows what upper_num and lower_num represents:
+ * upper_num = int128_num >> 64;
+ * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
+ */
+ __u64 upper_num, lower_num;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ upper_num = *(__u64 *)data;
+ lower_num = *(__u64 *)(data + 8);
+#else
+ upper_num = *(__u64 *)(data + 8);
+ lower_num = *(__u64 *)data;
+#endif
+
+ if (is_plain_text) {
+ if (upper_num == 0)
+ jsonw_printf(jw, "0x%llx", lower_num);
+ else
+ jsonw_printf(jw, "0x%llx%016llx", upper_num, lower_num);
+ } else {
+ if (upper_num == 0)
+ jsonw_printf(jw, "\"0x%llx\"", lower_num);
+ else
+ jsonw_printf(jw, "\"0x%llx%016llx\"", upper_num, lower_num);
+ }
+}
+
+static void btf_int128_shift(__u64 *print_num, u16 left_shift_bits,
+ u16 right_shift_bits)
+{
+ __u64 upper_num, lower_num;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ upper_num = print_num[0];
+ lower_num = print_num[1];
+#else
+ upper_num = print_num[1];
+ lower_num = print_num[0];
+#endif
+
+ /* shake out un-needed bits by shift/or operations */
+ if (left_shift_bits >= 64) {
+ upper_num = lower_num << (left_shift_bits - 64);
+ lower_num = 0;
+ } else {
+ upper_num = (upper_num << left_shift_bits) |
+ (lower_num >> (64 - left_shift_bits));
+ lower_num = lower_num << left_shift_bits;
+ }
+
+ if (right_shift_bits >= 64) {
+ lower_num = upper_num >> (right_shift_bits - 64);
+ upper_num = 0;
+ } else {
+ lower_num = (lower_num >> right_shift_bits) |
+ (upper_num << (64 - right_shift_bits));
+ upper_num = upper_num >> right_shift_bits;
+ }
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ print_num[0] = upper_num;
+ print_num[1] = lower_num;
+#else
+ print_num[0] = lower_num;
+ print_num[1] = upper_num;
+#endif
+}
+
static void btf_dumper_bitfield(__u32 nr_bits, __u8 bit_offset,
const void *data, json_writer_t *jw,
bool is_plain_text)
{
int left_shift_bits, right_shift_bits;
+ __u64 print_num[2] = {};
int bytes_to_copy;
int bits_to_copy;
- __u64 print_num;
bits_to_copy = bit_offset + nr_bits;
bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy);
- print_num = 0;
- memcpy(&print_num, data, bytes_to_copy);
+ memcpy(print_num, data, bytes_to_copy);
#if defined(__BIG_ENDIAN_BITFIELD)
left_shift_bits = bit_offset;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- left_shift_bits = 64 - bits_to_copy;
+ left_shift_bits = 128 - bits_to_copy;
#else
#error neither big nor little endian
#endif
- right_shift_bits = 64 - nr_bits;
+ right_shift_bits = 128 - nr_bits;
- print_num <<= left_shift_bits;
- print_num >>= right_shift_bits;
- if (is_plain_text)
- jsonw_printf(jw, "0x%llx", print_num);
- else
- jsonw_printf(jw, "%llu", print_num);
+ btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
+ btf_int128_print(jw, print_num, is_plain_text);
}
@@ -113,7 +182,7 @@ static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
int total_bits_offset;
/* bits_offset is at most 7.
- * BTF_INT_OFFSET() cannot exceed 64 bits.
+ * BTF_INT_OFFSET() cannot exceed 128 bits.
*/
total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
@@ -139,6 +208,11 @@ static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
return 0;
}
+ if (nr_bits == 128) {
+ btf_int128_print(jw, data, is_plain_text);
+ return 0;
+ }
+
switch (BTF_INT_ENCODING(*int_type)) {
case 0:
if (BTF_INT_BITS(*int_type) == 64)
@@ -235,6 +309,48 @@ static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
return ret;
}
+static int btf_dumper_var(const struct btf_dumper *d, __u32 type_id,
+ __u8 bit_offset, const void *data)
+{
+ const struct btf_type *t = btf__type_by_id(d->btf, type_id);
+ int ret;
+
+ jsonw_start_object(d->jw);
+ jsonw_name(d->jw, btf__name_by_offset(d->btf, t->name_off));
+ ret = btf_dumper_do_type(d, t->type, bit_offset, data);
+ jsonw_end_object(d->jw);
+
+ return ret;
+}
+
+static int btf_dumper_datasec(const struct btf_dumper *d, __u32 type_id,
+ const void *data)
+{
+ struct btf_var_secinfo *vsi;
+ const struct btf_type *t;
+ int ret = 0, i, vlen;
+
+ t = btf__type_by_id(d->btf, type_id);
+ if (!t)
+ return -EINVAL;
+
+ vlen = BTF_INFO_VLEN(t->info);
+ vsi = (struct btf_var_secinfo *)(t + 1);
+
+ jsonw_start_object(d->jw);
+ jsonw_name(d->jw, btf__name_by_offset(d->btf, t->name_off));
+ jsonw_start_array(d->jw);
+ for (i = 0; i < vlen; i++) {
+ ret = btf_dumper_do_type(d, vsi[i].type, 0, data + vsi[i].offset);
+ if (ret)
+ break;
+ }
+ jsonw_end_array(d->jw);
+ jsonw_end_object(d->jw);
+
+ return ret;
+}
+
static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
__u8 bit_offset, const void *data)
{
@@ -267,6 +383,10 @@ static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
return btf_dumper_modifier(d, type_id, bit_offset, data);
+ case BTF_KIND_VAR:
+ return btf_dumper_var(d, type_id, bit_offset, data);
+ case BTF_KIND_DATASEC:
+ return btf_dumper_datasec(d, type_id, data);
default:
jsonw_printf(d->jw, "(unsupported-kind");
return -EINVAL;
@@ -303,6 +423,7 @@ static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
{
const struct btf_type *proto_type;
const struct btf_array *array;
+ const struct btf_var *var;
const struct btf_type *t;
if (!type_id) {
@@ -366,6 +487,18 @@ static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
if (pos == -1)
return -1;
break;
+ case BTF_KIND_VAR:
+ var = (struct btf_var *)(t + 1);
+ if (var->linkage == BTF_VAR_STATIC)
+ BTF_PRINT_ARG("static ");
+ BTF_PRINT_TYPE(t->type);
+ BTF_PRINT_ARG(" %s",
+ btf__name_by_offset(btf, t->name_off));
+ break;
+ case BTF_KIND_DATASEC:
+ BTF_PRINT_ARG("section (\"%s\") ",
+ btf__name_by_offset(btf, t->name_off));
+ break;
case BTF_KIND_UNKN:
default:
return -1;
diff --git a/tools/bpf/bpftool/cfg.c b/tools/bpf/bpftool/cfg.c
index 31f0db41513f..3e21f994f262 100644
--- a/tools/bpf/bpftool/cfg.c
+++ b/tools/bpf/bpftool/cfg.c
@@ -157,6 +157,11 @@ static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
return false;
}
+static bool is_jmp_insn(u8 code)
+{
+ return BPF_CLASS(code) == BPF_JMP || BPF_CLASS(code) == BPF_JMP32;
+}
+
static bool func_partition_bb_head(struct func_node *func)
{
struct bpf_insn *cur, *end;
@@ -170,7 +175,7 @@ static bool func_partition_bb_head(struct func_node *func)
return true;
for (; cur <= end; cur++) {
- if (BPF_CLASS(cur->code) == BPF_JMP) {
+ if (is_jmp_insn(cur->code)) {
u8 opcode = BPF_OP(cur->code);
if (opcode == BPF_EXIT || opcode == BPF_CALL)
@@ -296,7 +301,7 @@ static bool func_add_bb_edges(struct func_node *func)
e->src = bb;
insn = bb->tail;
- if (BPF_CLASS(insn->code) != BPF_JMP ||
+ if (!is_jmp_insn(insn->code) ||
BPF_OP(insn->code) == BPF_EXIT) {
e->dst = bb_next(bb);
e->flags |= EDGE_FLAG_FALLTHROUGH;
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
index 4b5c8da2a7c0..73ec8ea33fb4 100644
--- a/tools/bpf/bpftool/cgroup.c
+++ b/tools/bpf/bpftool/cgroup.c
@@ -25,7 +25,8 @@
" ATTACH_TYPE := { ingress | egress | sock_create |\n" \
" sock_ops | device | bind4 | bind6 |\n" \
" post_bind4 | post_bind6 | connect4 |\n" \
- " connect6 | sendmsg4 | sendmsg6 }"
+ " connect6 | sendmsg4 | sendmsg6 |\n" \
+ " recvmsg4 | recvmsg6 | sysctl }"
static const char * const attach_type_strings[] = {
[BPF_CGROUP_INET_INGRESS] = "ingress",
@@ -41,6 +42,9 @@ static const char * const attach_type_strings[] = {
[BPF_CGROUP_INET6_POST_BIND] = "post_bind6",
[BPF_CGROUP_UDP4_SENDMSG] = "sendmsg4",
[BPF_CGROUP_UDP6_SENDMSG] = "sendmsg6",
+ [BPF_CGROUP_SYSCTL] = "sysctl",
+ [BPF_CGROUP_UDP4_RECVMSG] = "recvmsg4",
+ [BPF_CGROUP_UDP6_RECVMSG] = "recvmsg6",
[__MAX_BPF_ATTACH_TYPE] = NULL,
};
@@ -248,6 +252,13 @@ static int do_show_tree_fn(const char *fpath, const struct stat *sb,
for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++)
show_attached_bpf_progs(cgroup_fd, type, ftw->level);
+ if (errno == EINVAL)
+ /* Last attach type does not support query.
+ * Do not report an error for this, especially because batch
+ * mode would stop processing commands.
+ */
+ errno = 0;
+
if (json_output) {
jsonw_end_array(json_wtr);
jsonw_end_object(json_wtr);
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
new file mode 100644
index 000000000000..d672d9086fff
--- /dev/null
+++ b/tools/bpf/bpftool/feature.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (c) 2019 Netronome Systems, Inc. */
+
+#include <ctype.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <net/if.h>
+#include <sys/utsname.h>
+#include <sys/vfs.h>
+
+#include <linux/filter.h>
+#include <linux/limits.h>
+
+#include <bpf.h>
+#include <libbpf.h>
+
+#include "main.h"
+
+#ifndef PROC_SUPER_MAGIC
+# define PROC_SUPER_MAGIC 0x9fa0
+#endif
+
+enum probe_component {
+ COMPONENT_UNSPEC,
+ COMPONENT_KERNEL,
+ COMPONENT_DEVICE,
+};
+
+#define BPF_HELPER_MAKE_ENTRY(name) [BPF_FUNC_ ## name] = "bpf_" # name
+static const char * const helper_name[] = {
+ __BPF_FUNC_MAPPER(BPF_HELPER_MAKE_ENTRY)
+};
+
+#undef BPF_HELPER_MAKE_ENTRY
+
+/* Miscellaneous utility functions */
+
+static bool check_procfs(void)
+{
+ struct statfs st_fs;
+
+ if (statfs("/proc", &st_fs) < 0)
+ return false;
+ if ((unsigned long)st_fs.f_type != PROC_SUPER_MAGIC)
+ return false;
+
+ return true;
+}
+
+static void uppercase(char *str, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len && str[i] != '\0'; i++)
+ str[i] = toupper(str[i]);
+}
+
+/* Printing utility functions */
+
+static void
+print_bool_feature(const char *feat_name, const char *plain_name,
+ const char *define_name, bool res, const char *define_prefix)
+{
+ if (json_output)
+ jsonw_bool_field(json_wtr, feat_name, res);
+ else if (define_prefix)
+ printf("#define %s%sHAVE_%s\n", define_prefix,
+ res ? "" : "NO_", define_name);
+ else
+ printf("%s is %savailable\n", plain_name, res ? "" : "NOT ");
+}
+
+static void print_kernel_option(const char *name, const char *value)
+{
+ char *endptr;
+ int res;
+
+ /* No support for C-style ouptut */
+
+ if (json_output) {
+ if (!value) {
+ jsonw_null_field(json_wtr, name);
+ return;
+ }
+ errno = 0;
+ res = strtol(value, &endptr, 0);
+ if (!errno && *endptr == '\n')
+ jsonw_int_field(json_wtr, name, res);
+ else
+ jsonw_string_field(json_wtr, name, value);
+ } else {
+ if (value)
+ printf("%s is set to %s\n", name, value);
+ else
+ printf("%s is not set\n", name);
+ }
+}
+
+static void
+print_start_section(const char *json_title, const char *plain_title,
+ const char *define_comment, const char *define_prefix)
+{
+ if (json_output) {
+ jsonw_name(json_wtr, json_title);
+ jsonw_start_object(json_wtr);
+ } else if (define_prefix) {
+ printf("%s\n", define_comment);
+ } else {
+ printf("%s\n", plain_title);
+ }
+}
+
+static void
+print_end_then_start_section(const char *json_title, const char *plain_title,
+ const char *define_comment,
+ const char *define_prefix)
+{
+ if (json_output)
+ jsonw_end_object(json_wtr);
+ else
+ printf("\n");
+
+ print_start_section(json_title, plain_title, define_comment,
+ define_prefix);
+}
+
+/* Probing functions */
+
+static int read_procfs(const char *path)
+{
+ char *endptr, *line = NULL;
+ size_t len = 0;
+ FILE *fd;
+ int res;
+
+ fd = fopen(path, "r");
+ if (!fd)
+ return -1;
+
+ res = getline(&line, &len, fd);
+ fclose(fd);
+ if (res < 0)
+ return -1;
+
+ errno = 0;
+ res = strtol(line, &endptr, 10);
+ if (errno || *line == '\0' || *endptr != '\n')
+ res = -1;
+ free(line);
+
+ return res;
+}
+
+static void probe_unprivileged_disabled(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/kernel/unprivileged_bpf_disabled");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "unprivileged_bpf_disabled", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("bpf() syscall for unprivileged users is enabled\n");
+ break;
+ case 1:
+ printf("bpf() syscall restricted to privileged users\n");
+ break;
+ case -1:
+ printf("Unable to retrieve required privileges for bpf() syscall\n");
+ break;
+ default:
+ printf("bpf() syscall restriction has unknown value %d\n", res);
+ }
+ }
+}
+
+static void probe_jit_enable(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_enable");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_enable", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("JIT compiler is disabled\n");
+ break;
+ case 1:
+ printf("JIT compiler is enabled\n");
+ break;
+ case 2:
+ printf("JIT compiler is enabled with debugging traces in kernel logs\n");
+ break;
+ case -1:
+ printf("Unable to retrieve JIT-compiler status\n");
+ break;
+ default:
+ printf("JIT-compiler status has unknown value %d\n",
+ res);
+ }
+ }
+}
+
+static void probe_jit_harden(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_harden");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_harden", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("JIT compiler hardening is disabled\n");
+ break;
+ case 1:
+ printf("JIT compiler hardening is enabled for unprivileged users\n");
+ break;
+ case 2:
+ printf("JIT compiler hardening is enabled for all users\n");
+ break;
+ case -1:
+ printf("Unable to retrieve JIT hardening status\n");
+ break;
+ default:
+ printf("JIT hardening status has unknown value %d\n",
+ res);
+ }
+ }
+}
+
+static void probe_jit_kallsyms(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_kallsyms");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_kallsyms", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("JIT compiler kallsyms exports are disabled\n");
+ break;
+ case 1:
+ printf("JIT compiler kallsyms exports are enabled for root\n");
+ break;
+ case -1:
+ printf("Unable to retrieve JIT kallsyms export status\n");
+ break;
+ default:
+ printf("JIT kallsyms exports status has unknown value %d\n", res);
+ }
+ }
+}
+
+static void probe_jit_limit(void)
+{
+ int res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_limit");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_limit", res);
+ } else {
+ switch (res) {
+ case -1:
+ printf("Unable to retrieve global memory limit for JIT compiler for unprivileged users\n");
+ break;
+ default:
+ printf("Global memory limit for JIT compiler for unprivileged users is %d bytes\n", res);
+ }
+ }
+}
+
+static char *get_kernel_config_option(FILE *fd, const char *option)
+{
+ size_t line_n = 0, optlen = strlen(option);
+ char *res, *strval, *line = NULL;
+ ssize_t n;
+
+ rewind(fd);
+ while ((n = getline(&line, &line_n, fd)) > 0) {
+ if (strncmp(line, option, optlen))
+ continue;
+ /* Check we have at least '=', value, and '\n' */
+ if (strlen(line) < optlen + 3)
+ continue;
+ if (*(line + optlen) != '=')
+ continue;
+
+ /* Trim ending '\n' */
+ line[strlen(line) - 1] = '\0';
+
+ /* Copy and return config option value */
+ strval = line + optlen + 1;
+ res = strdup(strval);
+ free(line);
+ return res;
+ }
+ free(line);
+
+ return NULL;
+}
+
+static void probe_kernel_image_config(void)
+{
+ static const char * const options[] = {
+ /* Enable BPF */
+ "CONFIG_BPF",
+ /* Enable bpf() syscall */
+ "CONFIG_BPF_SYSCALL",
+ /* Does selected architecture support eBPF JIT compiler */
+ "CONFIG_HAVE_EBPF_JIT",
+ /* Compile eBPF JIT compiler */
+ "CONFIG_BPF_JIT",
+ /* Avoid compiling eBPF interpreter (use JIT only) */
+ "CONFIG_BPF_JIT_ALWAYS_ON",
+
+ /* cgroups */
+ "CONFIG_CGROUPS",
+ /* BPF programs attached to cgroups */
+ "CONFIG_CGROUP_BPF",
+ /* bpf_get_cgroup_classid() helper */
+ "CONFIG_CGROUP_NET_CLASSID",
+ /* bpf_skb_{,ancestor_}cgroup_id() helpers */
+ "CONFIG_SOCK_CGROUP_DATA",
+
+ /* Tracing: attach BPF to kprobes, tracepoints, etc. */
+ "CONFIG_BPF_EVENTS",
+ /* Kprobes */
+ "CONFIG_KPROBE_EVENTS",
+ /* Uprobes */
+ "CONFIG_UPROBE_EVENTS",
+ /* Tracepoints */
+ "CONFIG_TRACING",
+ /* Syscall tracepoints */
+ "CONFIG_FTRACE_SYSCALLS",
+ /* bpf_override_return() helper support for selected arch */
+ "CONFIG_FUNCTION_ERROR_INJECTION",
+ /* bpf_override_return() helper */
+ "CONFIG_BPF_KPROBE_OVERRIDE",
+
+ /* Network */
+ "CONFIG_NET",
+ /* AF_XDP sockets */
+ "CONFIG_XDP_SOCKETS",
+ /* BPF_PROG_TYPE_LWT_* and related helpers */
+ "CONFIG_LWTUNNEL_BPF",
+ /* BPF_PROG_TYPE_SCHED_ACT, TC (traffic control) actions */
+ "CONFIG_NET_ACT_BPF",
+ /* BPF_PROG_TYPE_SCHED_CLS, TC filters */
+ "CONFIG_NET_CLS_BPF",
+ /* TC clsact qdisc */
+ "CONFIG_NET_CLS_ACT",
+ /* Ingress filtering with TC */
+ "CONFIG_NET_SCH_INGRESS",
+ /* bpf_skb_get_xfrm_state() helper */
+ "CONFIG_XFRM",
+ /* bpf_get_route_realm() helper */
+ "CONFIG_IP_ROUTE_CLASSID",
+ /* BPF_PROG_TYPE_LWT_SEG6_LOCAL and related helpers */
+ "CONFIG_IPV6_SEG6_BPF",
+ /* BPF_PROG_TYPE_LIRC_MODE2 and related helpers */
+ "CONFIG_BPF_LIRC_MODE2",
+ /* BPF stream parser and BPF socket maps */
+ "CONFIG_BPF_STREAM_PARSER",
+ /* xt_bpf module for passing BPF programs to netfilter */
+ "CONFIG_NETFILTER_XT_MATCH_BPF",
+ /* bpfilter back-end for iptables */
+ "CONFIG_BPFILTER",
+ /* bpftilter module with "user mode helper" */
+ "CONFIG_BPFILTER_UMH",
+
+ /* test_bpf module for BPF tests */
+ "CONFIG_TEST_BPF",
+ };
+ char *value, *buf = NULL;
+ struct utsname utsn;
+ char path[PATH_MAX];
+ size_t i, n;
+ ssize_t ret;
+ FILE *fd;
+
+ if (uname(&utsn))
+ goto no_config;
+
+ snprintf(path, sizeof(path), "/boot/config-%s", utsn.release);
+
+ fd = fopen(path, "r");
+ if (!fd && errno == ENOENT) {
+ /* Some distributions put the config file at /proc/config, give
+ * it a try.
+ * Sometimes it is also at /proc/config.gz but we do not try
+ * this one for now, it would require linking against libz.
+ */
+ fd = fopen("/proc/config", "r");
+ }
+ if (!fd) {
+ p_info("skipping kernel config, can't open file: %s",
+ strerror(errno));
+ goto no_config;
+ }
+ /* Sanity checks */
+ ret = getline(&buf, &n, fd);
+ ret = getline(&buf, &n, fd);
+ if (!buf || !ret) {
+ p_info("skipping kernel config, can't read from file: %s",
+ strerror(errno));
+ free(buf);
+ goto no_config;
+ }
+ if (strcmp(buf, "# Automatically generated file; DO NOT EDIT.\n")) {
+ p_info("skipping kernel config, can't find correct file");
+ free(buf);
+ goto no_config;
+ }
+ free(buf);
+
+ for (i = 0; i < ARRAY_SIZE(options); i++) {
+ value = get_kernel_config_option(fd, options[i]);
+ print_kernel_option(options[i], value);
+ free(value);
+ }
+ fclose(fd);
+ return;
+
+no_config:
+ for (i = 0; i < ARRAY_SIZE(options); i++)
+ print_kernel_option(options[i], NULL);
+}
+
+static bool probe_bpf_syscall(const char *define_prefix)
+{
+ bool res;
+
+ bpf_load_program(BPF_PROG_TYPE_UNSPEC, NULL, 0, NULL, 0, NULL, 0);
+ res = (errno != ENOSYS);
+
+ print_bool_feature("have_bpf_syscall",
+ "bpf() syscall",
+ "BPF_SYSCALL",
+ res, define_prefix);
+
+ return res;
+}
+
+static void
+probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
+ const char *define_prefix, __u32 ifindex)
+{
+ char feat_name[128], plain_desc[128], define_name[128];
+ const char *plain_comment = "eBPF program_type ";
+ size_t maxlen;
+ bool res;
+
+ if (ifindex)
+ /* Only test offload-able program types */
+ switch (prog_type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_XDP:
+ break;
+ default:
+ return;
+ }
+
+ res = bpf_probe_prog_type(prog_type, ifindex);
+
+ supported_types[prog_type] |= res;
+
+ maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
+ if (strlen(prog_type_name[prog_type]) > maxlen) {
+ p_info("program type name too long");
+ return;
+ }
+
+ sprintf(feat_name, "have_%s_prog_type", prog_type_name[prog_type]);
+ sprintf(define_name, "%s_prog_type", prog_type_name[prog_type]);
+ uppercase(define_name, sizeof(define_name));
+ sprintf(plain_desc, "%s%s", plain_comment, prog_type_name[prog_type]);
+ print_bool_feature(feat_name, plain_desc, define_name, res,
+ define_prefix);
+}
+
+static void
+probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
+ __u32 ifindex)
+{
+ char feat_name[128], plain_desc[128], define_name[128];
+ const char *plain_comment = "eBPF map_type ";
+ size_t maxlen;
+ bool res;
+
+ res = bpf_probe_map_type(map_type, ifindex);
+
+ maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
+ if (strlen(map_type_name[map_type]) > maxlen) {
+ p_info("map type name too long");
+ return;
+ }
+
+ sprintf(feat_name, "have_%s_map_type", map_type_name[map_type]);
+ sprintf(define_name, "%s_map_type", map_type_name[map_type]);
+ uppercase(define_name, sizeof(define_name));
+ sprintf(plain_desc, "%s%s", plain_comment, map_type_name[map_type]);
+ print_bool_feature(feat_name, plain_desc, define_name, res,
+ define_prefix);
+}
+
+static void
+probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
+ const char *define_prefix, __u32 ifindex)
+{
+ const char *ptype_name = prog_type_name[prog_type];
+ char feat_name[128];
+ unsigned int id;
+ bool res;
+
+ if (ifindex)
+ /* Only test helpers for offload-able program types */
+ switch (prog_type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_XDP:
+ break;
+ default:
+ return;
+ }
+
+ if (json_output) {
+ sprintf(feat_name, "%s_available_helpers", ptype_name);
+ jsonw_name(json_wtr, feat_name);
+ jsonw_start_array(json_wtr);
+ } else if (!define_prefix) {
+ printf("eBPF helpers supported for program type %s:",
+ ptype_name);
+ }
+
+ for (id = 1; id < ARRAY_SIZE(helper_name); id++) {
+ if (!supported_type)
+ res = false;
+ else
+ res = bpf_probe_helper(id, prog_type, ifindex);
+
+ if (json_output) {
+ if (res)
+ jsonw_string(json_wtr, helper_name[id]);
+ } else if (define_prefix) {
+ printf("#define %sBPF__PROG_TYPE_%s__HELPER_%s %s\n",
+ define_prefix, ptype_name, helper_name[id],
+ res ? "1" : "0");
+ } else {
+ if (res)
+ printf("\n\t- %s", helper_name[id]);
+ }
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr);
+ else if (!define_prefix)
+ printf("\n");
+}
+
+static int do_probe(int argc, char **argv)
+{
+ enum probe_component target = COMPONENT_UNSPEC;
+ const char *define_prefix = NULL;
+ bool supported_types[128] = {};
+ __u32 ifindex = 0;
+ unsigned int i;
+ char *ifname;
+
+ /* Detection assumes user has sufficient privileges (CAP_SYS_ADMIN).
+ * Let's approximate, and restrict usage to root user only.
+ */
+ if (geteuid()) {
+ p_err("please run this command as root user");
+ return -1;
+ }
+
+ set_max_rlimit();
+
+ while (argc) {
+ if (is_prefix(*argv, "kernel")) {
+ if (target != COMPONENT_UNSPEC) {
+ p_err("component to probe already specified");
+ return -1;
+ }
+ target = COMPONENT_KERNEL;
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "dev")) {
+ NEXT_ARG();
+
+ if (target != COMPONENT_UNSPEC || ifindex) {
+ p_err("component to probe already specified");
+ return -1;
+ }
+ if (!REQ_ARGS(1))
+ return -1;
+
+ target = COMPONENT_DEVICE;
+ ifname = GET_ARG();
+ ifindex = if_nametoindex(ifname);
+ if (!ifindex) {
+ p_err("unrecognized netdevice '%s': %s", ifname,
+ strerror(errno));
+ return -1;
+ }
+ } else if (is_prefix(*argv, "macros") && !define_prefix) {
+ define_prefix = "";
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "prefix")) {
+ if (!define_prefix) {
+ p_err("'prefix' argument can only be use after 'macros'");
+ return -1;
+ }
+ if (strcmp(define_prefix, "")) {
+ p_err("'prefix' already defined");
+ return -1;
+ }
+ NEXT_ARG();
+
+ if (!REQ_ARGS(1))
+ return -1;
+ define_prefix = GET_ARG();
+ } else {
+ p_err("expected no more arguments, 'kernel', 'dev', 'macros' or 'prefix', got: '%s'?",
+ *argv);
+ return -1;
+ }
+ }
+
+ if (json_output) {
+ define_prefix = NULL;
+ jsonw_start_object(json_wtr);
+ }
+
+ switch (target) {
+ case COMPONENT_KERNEL:
+ case COMPONENT_UNSPEC:
+ if (define_prefix)
+ break;
+
+ print_start_section("system_config",
+ "Scanning system configuration...",
+ NULL, /* define_comment never used here */
+ NULL); /* define_prefix always NULL here */
+ if (check_procfs()) {
+ probe_unprivileged_disabled();
+ probe_jit_enable();
+ probe_jit_harden();
+ probe_jit_kallsyms();
+ probe_jit_limit();
+ } else {
+ p_info("/* procfs not mounted, skipping related probes */");
+ }
+ probe_kernel_image_config();
+ if (json_output)
+ jsonw_end_object(json_wtr);
+ else
+ printf("\n");
+ break;
+ default:
+ break;
+ }
+
+ print_start_section("syscall_config",
+ "Scanning system call availability...",
+ "/*** System call availability ***/",
+ define_prefix);
+
+ if (!probe_bpf_syscall(define_prefix))
+ /* bpf() syscall unavailable, don't probe other BPF features */
+ goto exit_close_json;
+
+ print_end_then_start_section("program_types",
+ "Scanning eBPF program types...",
+ "/*** eBPF program types ***/",
+ define_prefix);
+
+ for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
+ probe_prog_type(i, supported_types, define_prefix, ifindex);
+
+ print_end_then_start_section("map_types",
+ "Scanning eBPF map types...",
+ "/*** eBPF map types ***/",
+ define_prefix);
+
+ for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++)
+ probe_map_type(i, define_prefix, ifindex);
+
+ print_end_then_start_section("helpers",
+ "Scanning eBPF helper functions...",
+ "/*** eBPF helper functions ***/",
+ define_prefix);
+
+ if (define_prefix)
+ printf("/*\n"
+ " * Use %sHAVE_PROG_TYPE_HELPER(prog_type_name, helper_name)\n"
+ " * to determine if <helper_name> is available for <prog_type_name>,\n"
+ " * e.g.\n"
+ " * #if %sHAVE_PROG_TYPE_HELPER(xdp, bpf_redirect)\n"
+ " * // do stuff with this helper\n"
+ " * #elif\n"
+ " * // use a workaround\n"
+ " * #endif\n"
+ " */\n"
+ "#define %sHAVE_PROG_TYPE_HELPER(prog_type, helper) \\\n"
+ " %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n",
+ define_prefix, define_prefix, define_prefix,
+ define_prefix);
+ for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
+ probe_helpers_for_progtype(i, supported_types[i],
+ define_prefix, ifindex);
+
+exit_close_json:
+ if (json_output) {
+ /* End current "section" of probes */
+ jsonw_end_object(json_wtr);
+ /* End root object */
+ jsonw_end_object(json_wtr);
+ }
+
+ return 0;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %s %s probe [COMPONENT] [macros [prefix PREFIX]]\n"
+ " %s %s help\n"
+ "\n"
+ " COMPONENT := { kernel | dev NAME }\n"
+ "",
+ bin_name, argv[-2], bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "probe", do_probe },
+ { "help", do_help },
+ { 0 }
+};
+
+int do_feature(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index f44a1c2c4ea0..1ac1fc520e6a 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -56,7 +56,7 @@ static int do_help(int argc, char **argv)
" %s batch file FILE\n"
" %s version\n"
"\n"
- " OBJECT := { prog | map | cgroup | perf | net }\n"
+ " OBJECT := { prog | map | cgroup | perf | net | feature | btf }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, bin_name, bin_name);
@@ -187,6 +187,8 @@ static const struct cmd cmds[] = {
{ "cgroup", do_cgroup },
{ "perf", do_perf },
{ "net", do_net },
+ { "feature", do_feature },
+ { "btf", do_btf },
{ "version", do_version },
{ 0 }
};
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 052c91d4dc55..3d63feb7f852 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -73,8 +73,12 @@ static const char * const prog_type_name[] = {
[BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
[BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
[BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
+ [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
};
+extern const char * const map_type_name[];
+extern const size_t map_type_name_size;
+
enum bpf_obj_type {
BPF_OBJ_UNKNOWN,
BPF_OBJ_PROG,
@@ -145,6 +149,8 @@ int do_cgroup(int argc, char **arg);
int do_perf(int argc, char **arg);
int do_net(int argc, char **arg);
int do_tracelog(int argc, char **arg);
+int do_feature(int argc, char **argv);
+int do_btf(int argc, char **argv);
int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what);
int prog_parse_fd(int *argc, char ***argv);
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index 1ef1ee2280a2..5da5a7311f13 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -21,7 +21,7 @@
#include "json_writer.h"
#include "main.h"
-static const char * const map_type_name[] = {
+const char * const map_type_name[] = {
[BPF_MAP_TYPE_UNSPEC] = "unspec",
[BPF_MAP_TYPE_HASH] = "hash",
[BPF_MAP_TYPE_ARRAY] = "array",
@@ -46,8 +46,11 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage",
[BPF_MAP_TYPE_QUEUE] = "queue",
[BPF_MAP_TYPE_STACK] = "stack",
+ [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage",
};
+const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
+
static bool map_is_per_cpu(__u32 type)
{
return type == BPF_MAP_TYPE_PERCPU_HASH ||
@@ -151,11 +154,13 @@ static int do_dump_btf(const struct btf_dumper *d,
/* start of key-value pair */
jsonw_start_object(d->jw);
- jsonw_name(d->jw, "key");
+ if (map_info->btf_key_type_id) {
+ jsonw_name(d->jw, "key");
- ret = btf_dumper_type(d, map_info->btf_key_type_id, key);
- if (ret)
- goto err_end_obj;
+ ret = btf_dumper_type(d, map_info->btf_key_type_id, key);
+ if (ret)
+ goto err_end_obj;
+ }
if (!map_is_per_cpu(map_info->type)) {
jsonw_name(d->jw, "value");
@@ -257,20 +262,20 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
}
static void print_entry_error(struct bpf_map_info *info, unsigned char *key,
- const char *value)
+ const char *error_msg)
{
- int value_size = strlen(value);
+ int msg_size = strlen(error_msg);
bool single_line, break_names;
- break_names = info->key_size > 16 || value_size > 16;
- single_line = info->key_size + value_size <= 24 && !break_names;
+ break_names = info->key_size > 16 || msg_size > 16;
+ single_line = info->key_size + msg_size <= 24 && !break_names;
printf("key:%c", break_names ? '\n' : ' ');
fprint_hex(stdout, key, info->key_size, " ");
printf(single_line ? " " : "\n");
- printf("value:%c%s", break_names ? '\n' : ' ', value);
+ printf("value:%c%s", break_names ? '\n' : ' ', error_msg);
printf("\n");
}
@@ -285,16 +290,17 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
single_line = info->key_size + info->value_size <= 24 &&
!break_names;
- printf("key:%c", break_names ? '\n' : ' ');
- fprint_hex(stdout, key, info->key_size, " ");
+ if (info->key_size) {
+ printf("key:%c", break_names ? '\n' : ' ');
+ fprint_hex(stdout, key, info->key_size, " ");
- printf(single_line ? " " : "\n");
+ printf(single_line ? " " : "\n");
+ }
- printf("value:%c", break_names ? '\n' : ' ');
- if (value)
+ if (info->value_size) {
+ printf("value:%c", break_names ? '\n' : ' ');
fprint_hex(stdout, value, info->value_size, " ");
- else
- printf("<no entry>");
+ }
printf("\n");
} else {
@@ -303,18 +309,19 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
n = get_possible_cpus();
step = round_up(info->value_size, 8);
- printf("key:\n");
- fprint_hex(stdout, key, info->key_size, " ");
- printf("\n");
- for (i = 0; i < n; i++) {
- printf("value (CPU %02d):%c",
- i, info->value_size > 16 ? '\n' : ' ');
- if (value)
+ if (info->key_size) {
+ printf("key:\n");
+ fprint_hex(stdout, key, info->key_size, " ");
+ printf("\n");
+ }
+ if (info->value_size) {
+ for (i = 0; i < n; i++) {
+ printf("value (CPU %02d):%c",
+ i, info->value_size > 16 ? '\n' : ' ');
fprint_hex(stdout, value + i * step,
info->value_size, " ");
- else
- printf("<no entry>");
- printf("\n");
+ printf("\n");
+ }
}
}
}
@@ -429,6 +436,9 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
p_err("not enough value arguments for map of progs");
return -1;
}
+ if (is_prefix(*argv, "id"))
+ p_info("Warning: updating program array via MAP_ID, make sure this map is kept open\n"
+ " by some process or pinned otherwise update will be lost");
fd = prog_parse_fd(&argc, &argv);
if (fd < 0)
@@ -522,6 +532,9 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
}
close(fd);
+ if (info->btf_id)
+ jsonw_int_field(json_wtr, "btf_id", info->btf_id);
+
if (!hash_empty(map_table.table)) {
struct pinned_obj *obj;
@@ -588,15 +601,19 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
}
close(fd);
- printf("\n");
if (!hash_empty(map_table.table)) {
struct pinned_obj *obj;
hash_for_each_possible(map_table.table, obj, hash, info->id) {
if (obj->id == info->id)
- printf("\tpinned %s\n", obj->path);
+ printf("\n\tpinned %s", obj->path);
}
}
+
+ if (info->btf_id)
+ printf("\n\tbtf_id %d", info->btf_id);
+
+ printf("\n");
return 0;
}
@@ -699,18 +716,25 @@ static int dump_map_elem(int fd, void *key, void *value,
return 0;
if (json_output) {
+ jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "key");
print_hex_data_json(key, map_info->key_size);
jsonw_name(json_wtr, "value");
jsonw_start_object(json_wtr);
jsonw_string_field(json_wtr, "error", strerror(lookup_errno));
jsonw_end_object(json_wtr);
+ jsonw_end_object(json_wtr);
} else {
- if (errno == ENOENT)
- print_entry_plain(map_info, key, NULL);
- else
- print_entry_error(map_info, key,
- strerror(lookup_errno));
+ const char *msg = NULL;
+
+ if (lookup_errno == ENOENT)
+ msg = "<no entry>";
+ else if (lookup_errno == ENOSPC &&
+ map_info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
+ msg = "<cannot read>";
+
+ print_entry_error(map_info, key,
+ msg ? : strerror(lookup_errno));
}
return 0;
@@ -764,6 +788,10 @@ static int do_dump(int argc, char **argv)
}
}
+ if (info.type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
+ info.value_size != 8)
+ p_info("Warning: cannot read values from %s map with value_size != 8",
+ map_type_name[info.type]);
while (true) {
err = bpf_map_get_next_key(fd, prev_key, key);
if (err) {
@@ -794,6 +822,32 @@ exit_free:
return err;
}
+static int alloc_key_value(struct bpf_map_info *info, void **key, void **value)
+{
+ *key = NULL;
+ *value = NULL;
+
+ if (info->key_size) {
+ *key = malloc(info->key_size);
+ if (!*key) {
+ p_err("key mem alloc failed");
+ return -1;
+ }
+ }
+
+ if (info->value_size) {
+ *value = alloc_value(info);
+ if (!*value) {
+ p_err("value mem alloc failed");
+ free(*key);
+ *key = NULL;
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
static int do_update(int argc, char **argv)
{
struct bpf_map_info info = {};
@@ -810,13 +864,9 @@ static int do_update(int argc, char **argv)
if (fd < 0)
return -1;
- key = malloc(info.key_size);
- value = alloc_value(&info);
- if (!key || !value) {
- p_err("mem alloc failed");
- err = -1;
+ err = alloc_key_value(&info, &key, &value);
+ if (err)
goto exit_free;
- }
err = parse_elem(argv, &info, key, value, info.key_size,
info.value_size, &flags, &value_fd);
@@ -841,12 +891,51 @@ exit_free:
return err;
}
+static void print_key_value(struct bpf_map_info *info, void *key,
+ void *value)
+{
+ json_writer_t *btf_wtr;
+ struct btf *btf = NULL;
+ int err;
+
+ err = btf__get_from_id(info->btf_id, &btf);
+ if (err) {
+ p_err("failed to get btf");
+ return;
+ }
+
+ if (json_output) {
+ print_entry_json(info, key, value, btf);
+ } else if (btf) {
+ /* if here json_wtr wouldn't have been initialised,
+ * so let's create separate writer for btf
+ */
+ btf_wtr = get_btf_writer();
+ if (!btf_wtr) {
+ p_info("failed to create json writer for btf. falling back to plain output");
+ btf__free(btf);
+ btf = NULL;
+ print_entry_plain(info, key, value);
+ } else {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = btf_wtr,
+ .is_plain_text = true,
+ };
+
+ do_dump_btf(&d, info, key, value);
+ jsonw_destroy(&btf_wtr);
+ }
+ } else {
+ print_entry_plain(info, key, value);
+ }
+ btf__free(btf);
+}
+
static int do_lookup(int argc, char **argv)
{
struct bpf_map_info info = {};
__u32 len = sizeof(info);
- json_writer_t *btf_wtr;
- struct btf *btf = NULL;
void *key, *value;
int err;
int fd;
@@ -858,13 +947,9 @@ static int do_lookup(int argc, char **argv)
if (fd < 0)
return -1;
- key = malloc(info.key_size);
- value = alloc_value(&info);
- if (!key || !value) {
- p_err("mem alloc failed");
- err = -1;
+ err = alloc_key_value(&info, &key, &value);
+ if (err)
goto exit_free;
- }
err = parse_elem(argv, &info, key, NULL, info.key_size, 0, NULL, NULL);
if (err)
@@ -888,43 +973,12 @@ static int do_lookup(int argc, char **argv)
}
/* here means bpf_map_lookup_elem() succeeded */
- err = btf__get_from_id(info.btf_id, &btf);
- if (err) {
- p_err("failed to get btf");
- goto exit_free;
- }
-
- if (json_output) {
- print_entry_json(&info, key, value, btf);
- } else if (btf) {
- /* if here json_wtr wouldn't have been initialised,
- * so let's create separate writer for btf
- */
- btf_wtr = get_btf_writer();
- if (!btf_wtr) {
- p_info("failed to create json writer for btf. falling back to plain output");
- btf__free(btf);
- btf = NULL;
- print_entry_plain(&info, key, value);
- } else {
- struct btf_dumper d = {
- .btf = btf,
- .jw = btf_wtr,
- .is_plain_text = true,
- };
-
- do_dump_btf(&d, &info, key, value);
- jsonw_destroy(&btf_wtr);
- }
- } else {
- print_entry_plain(&info, key, value);
- }
+ print_key_value(&info, key, value);
exit_free:
free(key);
free(value);
close(fd);
- btf__free(btf);
return err;
}
@@ -1111,6 +1165,9 @@ static int do_create(int argc, char **argv)
return -1;
}
NEXT_ARG();
+ } else {
+ p_err("unknown arg %s", *argv);
+ return -1;
}
}
@@ -1137,6 +1194,49 @@ static int do_create(int argc, char **argv)
return 0;
}
+static int do_pop_dequeue(int argc, char **argv)
+{
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ void *key, *value;
+ int err;
+ int fd;
+
+ if (argc < 2)
+ usage();
+
+ fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
+ if (fd < 0)
+ return -1;
+
+ err = alloc_key_value(&info, &key, &value);
+ if (err)
+ goto exit_free;
+
+ err = bpf_map_lookup_and_delete_elem(fd, key, value);
+ if (err) {
+ if (errno == ENOENT) {
+ if (json_output)
+ jsonw_null(json_wtr);
+ else
+ printf("Error: empty map\n");
+ } else {
+ p_err("pop failed: %s", strerror(errno));
+ }
+
+ goto exit_free;
+ }
+
+ print_key_value(&info, key, value);
+
+exit_free:
+ free(key);
+ free(value);
+ close(fd);
+
+ return err;
+}
+
static int do_help(int argc, char **argv)
{
if (json_output) {
@@ -1150,12 +1250,17 @@ static int do_help(int argc, char **argv)
" entries MAX_ENTRIES name NAME [flags FLAGS] \\\n"
" [dev NAME]\n"
" %s %s dump MAP\n"
- " %s %s update MAP key DATA value VALUE [UPDATE_FLAGS]\n"
- " %s %s lookup MAP key DATA\n"
+ " %s %s update MAP [key DATA] [value VALUE] [UPDATE_FLAGS]\n"
+ " %s %s lookup MAP [key DATA]\n"
" %s %s getnext MAP [key DATA]\n"
" %s %s delete MAP key DATA\n"
" %s %s pin MAP FILE\n"
" %s %s event_pipe MAP [cpu N index M]\n"
+ " %s %s peek MAP\n"
+ " %s %s push MAP value VALUE\n"
+ " %s %s pop MAP\n"
+ " %s %s enqueue MAP value VALUE\n"
+ " %s %s dequeue MAP\n"
" %s %s help\n"
"\n"
" " HELP_SPEC_MAP "\n"
@@ -1173,7 +1278,8 @@ static int do_help(int argc, char **argv)
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2]);
+ bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
+ bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]);
return 0;
}
@@ -1190,6 +1296,11 @@ static const struct cmd cmds[] = {
{ "pin", do_pin },
{ "event_pipe", do_event_pipe },
{ "create", do_create },
+ { "peek", do_lookup },
+ { "push", do_update },
+ { "enqueue", do_update },
+ { "pop", do_pop_dequeue },
+ { "dequeue", do_pop_dequeue },
{ 0 }
};
diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
index db0e7de49d49..67e99c56bc88 100644
--- a/tools/bpf/bpftool/net.c
+++ b/tools/bpf/bpftool/net.c
@@ -3,6 +3,7 @@
#define _GNU_SOURCE
#include <errno.h>
+#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
@@ -12,6 +13,8 @@
#include <linux/rtnetlink.h>
#include <linux/tc_act/tc_bpf.h>
#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
#include <bpf.h>
#include <nlattr.h>
@@ -48,6 +51,10 @@ struct bpf_filter_t {
int ifindex;
};
+struct bpf_attach_info {
+ __u32 flow_dissector_id;
+};
+
static int dump_link_nlmsg(void *cookie, void *msg, struct nlattr **tb)
{
struct bpf_netdev_t *netinfo = cookie;
@@ -180,8 +187,45 @@ out:
return 0;
}
+static int query_flow_dissector(struct bpf_attach_info *attach_info)
+{
+ __u32 attach_flags;
+ __u32 prog_ids[1];
+ __u32 prog_cnt;
+ int err;
+ int fd;
+
+ fd = open("/proc/self/ns/net", O_RDONLY);
+ if (fd < 0) {
+ p_err("can't open /proc/self/ns/net: %d",
+ strerror(errno));
+ return -1;
+ }
+ prog_cnt = ARRAY_SIZE(prog_ids);
+ err = bpf_prog_query(fd, BPF_FLOW_DISSECTOR, 0,
+ &attach_flags, prog_ids, &prog_cnt);
+ close(fd);
+ if (err) {
+ if (errno == EINVAL) {
+ /* Older kernel's don't support querying
+ * flow dissector programs.
+ */
+ errno = 0;
+ return 0;
+ }
+ p_err("can't query prog: %s", strerror(errno));
+ return -1;
+ }
+
+ if (prog_cnt == 1)
+ attach_info->flow_dissector_id = prog_ids[0];
+
+ return 0;
+}
+
static int do_show(int argc, char **argv)
{
+ struct bpf_attach_info attach_info = {};
int i, sock, ret, filter_idx = -1;
struct bpf_netdev_t dev_array;
unsigned int nl_pid;
@@ -199,6 +243,10 @@ static int do_show(int argc, char **argv)
usage();
}
+ ret = query_flow_dissector(&attach_info);
+ if (ret)
+ return -1;
+
sock = libbpf_netlink_open(&nl_pid);
if (sock < 0) {
fprintf(stderr, "failed to open netlink sock\n");
@@ -227,6 +275,12 @@ static int do_show(int argc, char **argv)
}
NET_END_ARRAY("\n");
}
+
+ NET_START_ARRAY("flow_dissector", "%s:\n");
+ if (attach_info.flow_dissector_id > 0)
+ NET_DUMP_UINT("id", "id %u", attach_info.flow_dissector_id);
+ NET_END_ARRAY("\n");
+
NET_END_OBJECT;
if (json_output)
jsonw_end_array(json_wtr);
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index b54ed82b9589..7a4e21a31523 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -214,6 +214,10 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible);
+ if (info->run_time_ns) {
+ jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
+ jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
+ }
print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
@@ -245,6 +249,9 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
if (info->nr_map_ids)
show_prog_maps(fd, info->nr_map_ids);
+ if (info->btf_id)
+ jsonw_int_field(json_wtr, "btf_id", info->btf_id);
+
if (!hash_empty(prog_table.table)) {
struct pinned_obj *obj;
@@ -277,6 +284,9 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
printf("%s", info->gpl_compatible ? " gpl" : "");
+ if (info->run_time_ns)
+ printf(" run_time_ns %lld run_cnt %lld",
+ info->run_time_ns, info->run_cnt);
printf("\n");
if (info->load_time) {
@@ -312,6 +322,9 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
}
}
+ if (info->btf_id)
+ printf("\n\tbtf_id %d", info->btf_id);
+
printf("\n");
}
@@ -394,41 +407,31 @@ static int do_show(int argc, char **argv)
static int do_dump(int argc, char **argv)
{
- unsigned int finfo_rec_size, linfo_rec_size, jited_linfo_rec_size;
- void *func_info = NULL, *linfo = NULL, *jited_linfo = NULL;
- unsigned int nr_finfo, nr_linfo = 0, nr_jited_linfo = 0;
+ struct bpf_prog_info_linear *info_linear;
struct bpf_prog_linfo *prog_linfo = NULL;
- unsigned long *func_ksyms = NULL;
- struct bpf_prog_info info = {};
- unsigned int *func_lens = NULL;
+ enum {DUMP_JITED, DUMP_XLATED} mode;
const char *disasm_opt = NULL;
- unsigned int nr_func_ksyms;
- unsigned int nr_func_lens;
+ struct bpf_prog_info *info;
struct dump_data dd = {};
- __u32 len = sizeof(info);
+ void *func_info = NULL;
struct btf *btf = NULL;
- unsigned int buf_size;
char *filepath = NULL;
bool opcodes = false;
bool visual = false;
char func_sig[1024];
unsigned char *buf;
bool linum = false;
- __u32 *member_len;
- __u64 *member_ptr;
+ __u32 member_len;
+ __u64 arrays;
ssize_t n;
- int err;
int fd;
if (is_prefix(*argv, "jited")) {
if (disasm_init())
return -1;
-
- member_len = &info.jited_prog_len;
- member_ptr = &info.jited_prog_insns;
+ mode = DUMP_JITED;
} else if (is_prefix(*argv, "xlated")) {
- member_len = &info.xlated_prog_len;
- member_ptr = &info.xlated_prog_insns;
+ mode = DUMP_XLATED;
} else {
p_err("expected 'xlated' or 'jited', got: %s", *argv);
return -1;
@@ -467,175 +470,50 @@ static int do_dump(int argc, char **argv)
return -1;
}
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
- if (err) {
- p_err("can't get prog info: %s", strerror(errno));
- return -1;
- }
-
- if (!*member_len) {
- p_info("no instructions returned");
- close(fd);
- return 0;
- }
+ if (mode == DUMP_JITED)
+ arrays = 1UL << BPF_PROG_INFO_JITED_INSNS;
+ else
+ arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS;
- buf_size = *member_len;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
+ arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
- buf = malloc(buf_size);
- if (!buf) {
- p_err("mem alloc failed");
- close(fd);
+ info_linear = bpf_program__get_prog_info_linear(fd, arrays);
+ close(fd);
+ if (IS_ERR_OR_NULL(info_linear)) {
+ p_err("can't get prog info: %s", strerror(errno));
return -1;
}
- nr_func_ksyms = info.nr_jited_ksyms;
- if (nr_func_ksyms) {
- func_ksyms = malloc(nr_func_ksyms * sizeof(__u64));
- if (!func_ksyms) {
- p_err("mem alloc failed");
- close(fd);
+ info = &info_linear->info;
+ if (mode == DUMP_JITED) {
+ if (info->jited_prog_len == 0) {
+ p_info("no instructions returned");
goto err_free;
}
- }
-
- nr_func_lens = info.nr_jited_func_lens;
- if (nr_func_lens) {
- func_lens = malloc(nr_func_lens * sizeof(__u32));
- if (!func_lens) {
- p_err("mem alloc failed");
- close(fd);
+ buf = (unsigned char *)(info->jited_prog_insns);
+ member_len = info->jited_prog_len;
+ } else { /* DUMP_XLATED */
+ if (info->xlated_prog_len == 0) {
+ p_err("error retrieving insn dump: kernel.kptr_restrict set?");
goto err_free;
}
+ buf = (unsigned char *)info->xlated_prog_insns;
+ member_len = info->xlated_prog_len;
}
- nr_finfo = info.nr_func_info;
- finfo_rec_size = info.func_info_rec_size;
- if (nr_finfo && finfo_rec_size) {
- func_info = malloc(nr_finfo * finfo_rec_size);
- if (!func_info) {
- p_err("mem alloc failed");
- close(fd);
- goto err_free;
- }
- }
-
- linfo_rec_size = info.line_info_rec_size;
- if (info.nr_line_info && linfo_rec_size && info.btf_id) {
- nr_linfo = info.nr_line_info;
- linfo = malloc(nr_linfo * linfo_rec_size);
- if (!linfo) {
- p_err("mem alloc failed");
- close(fd);
- goto err_free;
- }
- }
-
- jited_linfo_rec_size = info.jited_line_info_rec_size;
- if (info.nr_jited_line_info &&
- jited_linfo_rec_size &&
- info.nr_jited_ksyms &&
- info.nr_jited_func_lens &&
- info.btf_id) {
- nr_jited_linfo = info.nr_jited_line_info;
- jited_linfo = malloc(nr_jited_linfo * jited_linfo_rec_size);
- if (!jited_linfo) {
- p_err("mem alloc failed");
- close(fd);
- goto err_free;
- }
- }
-
- memset(&info, 0, sizeof(info));
-
- *member_ptr = ptr_to_u64(buf);
- *member_len = buf_size;
- info.jited_ksyms = ptr_to_u64(func_ksyms);
- info.nr_jited_ksyms = nr_func_ksyms;
- info.jited_func_lens = ptr_to_u64(func_lens);
- info.nr_jited_func_lens = nr_func_lens;
- info.nr_func_info = nr_finfo;
- info.func_info_rec_size = finfo_rec_size;
- info.func_info = ptr_to_u64(func_info);
- info.nr_line_info = nr_linfo;
- info.line_info_rec_size = linfo_rec_size;
- info.line_info = ptr_to_u64(linfo);
- info.nr_jited_line_info = nr_jited_linfo;
- info.jited_line_info_rec_size = jited_linfo_rec_size;
- info.jited_line_info = ptr_to_u64(jited_linfo);
-
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
- close(fd);
- if (err) {
- p_err("can't get prog info: %s", strerror(errno));
- goto err_free;
- }
-
- if (*member_len > buf_size) {
- p_err("too many instructions returned");
- goto err_free;
- }
-
- if (info.nr_jited_ksyms > nr_func_ksyms) {
- p_err("too many addresses returned");
- goto err_free;
- }
-
- if (info.nr_jited_func_lens > nr_func_lens) {
- p_err("too many values returned");
- goto err_free;
- }
-
- if (info.nr_func_info != nr_finfo) {
- p_err("incorrect nr_func_info %d vs. expected %d",
- info.nr_func_info, nr_finfo);
- goto err_free;
- }
-
- if (info.func_info_rec_size != finfo_rec_size) {
- p_err("incorrect func_info_rec_size %d vs. expected %d",
- info.func_info_rec_size, finfo_rec_size);
- goto err_free;
- }
-
- if (linfo && info.nr_line_info != nr_linfo) {
- p_err("incorrect nr_line_info %u vs. expected %u",
- info.nr_line_info, nr_linfo);
- goto err_free;
- }
-
- if (info.line_info_rec_size != linfo_rec_size) {
- p_err("incorrect line_info_rec_size %u vs. expected %u",
- info.line_info_rec_size, linfo_rec_size);
- goto err_free;
- }
-
- if (jited_linfo && info.nr_jited_line_info != nr_jited_linfo) {
- p_err("incorrect nr_jited_line_info %u vs. expected %u",
- info.nr_jited_line_info, nr_jited_linfo);
- goto err_free;
- }
-
- if (info.jited_line_info_rec_size != jited_linfo_rec_size) {
- p_err("incorrect jited_line_info_rec_size %u vs. expected %u",
- info.jited_line_info_rec_size, jited_linfo_rec_size);
- goto err_free;
- }
-
- if ((member_len == &info.jited_prog_len &&
- info.jited_prog_insns == 0) ||
- (member_len == &info.xlated_prog_len &&
- info.xlated_prog_insns == 0)) {
- p_err("error retrieving insn dump: kernel.kptr_restrict set?");
- goto err_free;
- }
-
- if (info.btf_id && btf__get_from_id(info.btf_id, &btf)) {
+ if (info->btf_id && btf__get_from_id(info->btf_id, &btf)) {
p_err("failed to get btf");
goto err_free;
}
- if (nr_linfo) {
- prog_linfo = bpf_prog_linfo__new(&info);
+ func_info = (void *)info->func_info;
+
+ if (info->nr_line_info) {
+ prog_linfo = bpf_prog_linfo__new(info);
if (!prog_linfo)
p_info("error in processing bpf_line_info. continue without it.");
}
@@ -648,9 +526,9 @@ static int do_dump(int argc, char **argv)
goto err_free;
}
- n = write(fd, buf, *member_len);
+ n = write(fd, buf, member_len);
close(fd);
- if (n != *member_len) {
+ if (n != member_len) {
p_err("error writing output file: %s",
n < 0 ? strerror(errno) : "short write");
goto err_free;
@@ -658,19 +536,19 @@ static int do_dump(int argc, char **argv)
if (json_output)
jsonw_null(json_wtr);
- } else if (member_len == &info.jited_prog_len) {
+ } else if (mode == DUMP_JITED) {
const char *name = NULL;
- if (info.ifindex) {
- name = ifindex_to_bfd_params(info.ifindex,
- info.netns_dev,
- info.netns_ino,
+ if (info->ifindex) {
+ name = ifindex_to_bfd_params(info->ifindex,
+ info->netns_dev,
+ info->netns_ino,
&disasm_opt);
if (!name)
goto err_free;
}
- if (info.nr_jited_func_lens && info.jited_func_lens) {
+ if (info->nr_jited_func_lens && info->jited_func_lens) {
struct kernel_sym *sym = NULL;
struct bpf_func_info *record;
char sym_name[SYM_MAX_NAME];
@@ -678,17 +556,16 @@ static int do_dump(int argc, char **argv)
__u64 *ksyms = NULL;
__u32 *lens;
__u32 i;
-
- if (info.nr_jited_ksyms) {
+ if (info->nr_jited_ksyms) {
kernel_syms_load(&dd);
- ksyms = (__u64 *) info.jited_ksyms;
+ ksyms = (__u64 *) info->jited_ksyms;
}
if (json_output)
jsonw_start_array(json_wtr);
- lens = (__u32 *) info.jited_func_lens;
- for (i = 0; i < info.nr_jited_func_lens; i++) {
+ lens = (__u32 *) info->jited_func_lens;
+ for (i = 0; i < info->nr_jited_func_lens; i++) {
if (ksyms) {
sym = kernel_syms_search(&dd, ksyms[i]);
if (sym)
@@ -700,7 +577,7 @@ static int do_dump(int argc, char **argv)
}
if (func_info) {
- record = func_info + i * finfo_rec_size;
+ record = func_info + i * info->func_info_rec_size;
btf_dumper_type_only(btf, record->type_id,
func_sig,
sizeof(func_sig));
@@ -737,49 +614,37 @@ static int do_dump(int argc, char **argv)
if (json_output)
jsonw_end_array(json_wtr);
} else {
- disasm_print_insn(buf, *member_len, opcodes, name,
+ disasm_print_insn(buf, member_len, opcodes, name,
disasm_opt, btf, NULL, 0, 0, false);
}
} else if (visual) {
if (json_output)
jsonw_null(json_wtr);
else
- dump_xlated_cfg(buf, *member_len);
+ dump_xlated_cfg(buf, member_len);
} else {
kernel_syms_load(&dd);
- dd.nr_jited_ksyms = info.nr_jited_ksyms;
- dd.jited_ksyms = (__u64 *) info.jited_ksyms;
+ dd.nr_jited_ksyms = info->nr_jited_ksyms;
+ dd.jited_ksyms = (__u64 *) info->jited_ksyms;
dd.btf = btf;
dd.func_info = func_info;
- dd.finfo_rec_size = finfo_rec_size;
+ dd.finfo_rec_size = info->func_info_rec_size;
dd.prog_linfo = prog_linfo;
if (json_output)
- dump_xlated_json(&dd, buf, *member_len, opcodes,
+ dump_xlated_json(&dd, buf, member_len, opcodes,
linum);
else
- dump_xlated_plain(&dd, buf, *member_len, opcodes,
+ dump_xlated_plain(&dd, buf, member_len, opcodes,
linum);
kernel_syms_destroy(&dd);
}
- free(buf);
- free(func_ksyms);
- free(func_lens);
- free(func_info);
- free(linfo);
- free(jited_linfo);
- bpf_prog_linfo__free(prog_linfo);
+ free(info_linear);
return 0;
err_free:
- free(buf);
- free(func_ksyms);
- free(func_lens);
- free(func_info);
- free(linfo);
- free(jited_linfo);
- bpf_prog_linfo__free(prog_linfo);
+ free(info_linear);
return -1;
}
@@ -931,10 +796,9 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
err = libbpf_prog_type_by_name(type, &attr.prog_type,
&expected_attach_type);
free(type);
- if (err < 0) {
- p_err("unknown program type '%s'", *argv);
+ if (err < 0)
goto err_free_reuse_maps;
- }
+
NEXT_ARG();
} else if (is_prefix(*argv, "map")) {
void *new_map_replace;
@@ -1015,6 +879,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
}
}
+ set_max_rlimit();
+
obj = __bpf_object__open_xattr(&attr, bpf_flags);
if (IS_ERR_OR_NULL(obj)) {
p_err("failed to open object file");
@@ -1029,11 +895,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
err = libbpf_prog_type_by_name(sec_name, &prog_type,
&expected_attach_type);
- if (err < 0) {
- p_err("failed to guess program type based on section name %s\n",
- sec_name);
+ if (err < 0)
goto err_close_obj;
- }
}
bpf_program__set_ifindex(pos, ifindex);
@@ -1050,7 +913,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
j = 0;
while (j < old_map_fds && map_replace[j].name) {
i = 0;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
map_replace[j].idx = i;
break;
@@ -1071,7 +934,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
/* Set ifindex and name reuse */
j = 0;
idx = 0;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
if (!bpf_map__is_offload_neutral(map))
bpf_map__set_ifindex(map, ifindex);
@@ -1097,8 +960,6 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
goto err_close_obj;
}
- set_max_rlimit();
-
err = bpf_object__load(obj);
if (err) {
p_err("failed to load object file");
@@ -1199,11 +1060,12 @@ static int do_help(int argc, char **argv)
" tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
" cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
" lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
- " sk_reuseport | flow_dissector |\n"
+ " sk_reuseport | flow_dissector | cgroup/sysctl |\n"
" cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
" cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
- " cgroup/sendmsg4 | cgroup/sendmsg6 }\n"
- " ATTACH_TYPE := { msg_verdict | skb_verdict | skb_parse |\n"
+ " cgroup/sendmsg4 | cgroup/sendmsg6 | cgroup/recvmsg4 |\n"
+ " cgroup/recvmsg6 }\n"
+ " ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
" flow_dissector }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
index 7073dbe1ff27..0bb17bf88b18 100644
--- a/tools/bpf/bpftool/xlated_dumper.c
+++ b/tools/bpf/bpftool/xlated_dumper.c
@@ -195,6 +195,9 @@ static const char *print_imm(void *private_data,
if (insn->src_reg == BPF_PSEUDO_MAP_FD)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"map[id:%u]", insn->imm);
+ else if (insn->src_reg == BPF_PSEUDO_MAP_VALUE)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "map[id:%u][0]+%u", insn->imm, (insn + 1)->imm);
else
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"0x%llx", (unsigned long long)full_imm);
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 5467c6bf9ceb..3b24231c58a2 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
feature_dir := $(srctree)/tools/build/feature
ifneq ($(OUTPUT),)
@@ -53,10 +54,6 @@ FEATURE_TESTS_BASIC := \
libslang \
libcrypto \
libunwind \
- libunwind-x86 \
- libunwind-x86_64 \
- libunwind-arm \
- libunwind-aarch64 \
pthread-attr-setaffinity-np \
pthread-barrier \
reallocarray \
@@ -70,8 +67,9 @@ FEATURE_TESTS_BASIC := \
sched_getcpu \
sdt \
setns \
- libopencsd \
- libaio
+ libaio \
+ libzstd \
+ disassembler-four-args
# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
# of all feature tests
@@ -84,6 +82,11 @@ FEATURE_TESTS_EXTRA := \
libbabeltrace \
libbfd-liberty \
libbfd-liberty-z \
+ libopencsd \
+ libunwind-x86 \
+ libunwind-x86_64 \
+ libunwind-arm \
+ libunwind-aarch64 \
libunwind-debug-frame \
libunwind-debug-frame-arm \
libunwind-debug-frame-aarch64 \
@@ -118,7 +121,9 @@ FEATURE_DISPLAY ?= \
lzma \
get_cpuid \
bpf \
- libaio
+ libaio \
+ libzstd \
+ disassembler-four-args
# Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
# If in the future we need per-feature checks/flags for features not
diff --git a/tools/build/Makefile.include b/tools/build/Makefile.include
index d360f39a445b..8dadaa0fbb43 100644
--- a/tools/build/Makefile.include
+++ b/tools/build/Makefile.include
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
fixdep:
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 7ceb4441b627..4b8244ee65ce 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -62,7 +62,8 @@ FILES= \
test-clang.bin \
test-llvm.bin \
test-llvm-version.bin \
- test-libaio.bin
+ test-libaio.bin \
+ test-libzstd.bin
FILES := $(addprefix $(OUTPUT),$(FILES))
@@ -301,6 +302,9 @@ $(OUTPUT)test-clang.bin:
$(OUTPUT)test-libaio.bin:
$(BUILD) -lrt
+$(OUTPUT)test-libzstd.bin:
+ $(BUILD) -lzstd
+
###############################
clean:
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 20cdaa4fc112..a59c53705093 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -170,14 +170,22 @@
# include "test-setns.c"
#undef main
-#define main main_test_libopencsd
-# include "test-libopencsd.c"
-#undef main
-
#define main main_test_libaio
# include "test-libaio.c"
#undef main
+#define main main_test_reallocarray
+# include "test-reallocarray.c"
+#undef main
+
+#define main main_test_disassembler_four_args
+# include "test-disassembler-four-args.c"
+#undef main
+
+#define main main_test_zstd
+# include "test-libzstd.c"
+#undef main
+
int main(int argc, char *argv[])
{
main_test_libpython();
@@ -217,8 +225,10 @@ int main(int argc, char *argv[])
main_test_sched_getcpu();
main_test_sdt();
main_test_setns();
- main_test_libopencsd();
main_test_libaio();
+ main_test_reallocarray();
+ main_test_disassembler_four_args();
+ main_test_libzstd();
return 0;
}
diff --git a/tools/build/feature/test-get_current_dir_name.c b/tools/build/feature/test-get_current_dir_name.c
index 573000f93212..c3c201691b4f 100644
--- a/tools/build/feature/test-get_current_dir_name.c
+++ b/tools/build/feature/test-get_current_dir_name.c
@@ -8,3 +8,4 @@ int main(void)
free(get_current_dir_name());
return 0;
}
+#undef _GNU_SOURCE
diff --git a/tools/build/feature/test-libopencsd.c b/tools/build/feature/test-libopencsd.c
index d68eb4fb40cc..2b0e02c38870 100644
--- a/tools/build/feature/test-libopencsd.c
+++ b/tools/build/feature/test-libopencsd.c
@@ -4,9 +4,9 @@
/*
* Check OpenCSD library version is sufficient to provide required features
*/
-#define OCSD_MIN_VER ((0 << 16) | (10 << 8) | (0))
+#define OCSD_MIN_VER ((0 << 16) | (11 << 8) | (0))
#if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER)
-#error "OpenCSD >= 0.10.0 is required"
+#error "OpenCSD >= 0.11.0 is required"
#endif
int main(void)
diff --git a/tools/build/feature/test-libpython.c b/tools/build/feature/test-libpython.c
index 0c1641b0d9a7..371c9113e49d 100644
--- a/tools/build/feature/test-libpython.c
+++ b/tools/build/feature/test-libpython.c
@@ -7,3 +7,4 @@ int main(void)
return 0;
}
+#undef _GNU_SOURCE
diff --git a/tools/build/feature/test-libzstd.c b/tools/build/feature/test-libzstd.c
new file mode 100644
index 000000000000..55268c01b84d
--- /dev/null
+++ b/tools/build/feature/test-libzstd.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <zstd.h>
+
+int main(void)
+{
+ ZSTD_CStream *cstream;
+
+ cstream = ZSTD_createCStream();
+ ZSTD_freeCStream(cstream);
+
+ return 0;
+}
diff --git a/tools/build/feature/test-reallocarray.c b/tools/build/feature/test-reallocarray.c
index 8170de35150d..8f6743e31da7 100644
--- a/tools/build/feature/test-reallocarray.c
+++ b/tools/build/feature/test-reallocarray.c
@@ -6,3 +6,5 @@ int main(void)
{
return !!reallocarray(NULL, 1, 1);
}
+
+#undef _GNU_SOURCE
diff --git a/tools/build/feature/test-sched_getcpu.c b/tools/build/feature/test-sched_getcpu.c
index e448deb4124c..48995ac7911e 100644
--- a/tools/build/feature/test-sched_getcpu.c
+++ b/tools/build/feature/test-sched_getcpu.c
@@ -8,3 +8,5 @@ int main(void)
{
return sched_getcpu();
}
+
+#undef _GNU_SOURCE
diff --git a/tools/build/feature/test-setns.c b/tools/build/feature/test-setns.c
index 1f714d2a658b..4a1581ae7a55 100644
--- a/tools/build/feature/test-setns.c
+++ b/tools/build/feature/test-setns.c
@@ -5,3 +5,4 @@ int main(void)
{
return setns(0, 0);
}
+#undef _GNU_SOURCE
diff --git a/tools/debugging/Makefile b/tools/debugging/Makefile
new file mode 100644
index 000000000000..e2b7c1a6fb8f
--- /dev/null
+++ b/tools/debugging/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for debugging tools
+
+PREFIX ?= /usr
+BINDIR ?= bin
+INSTALL ?= install
+
+TARGET = kernel-chktaint
+
+all: $(TARGET)
+
+clean:
+
+install: kernel-chktaint
+ $(INSTALL) -D -m 755 $(TARGET) $(DESTDIR)$(PREFIX)/$(BINDIR)/$(TARGET)
+
diff --git a/tools/debugging/kernel-chktaint b/tools/debugging/kernel-chktaint
new file mode 100755
index 000000000000..2240cb56e6e5
--- /dev/null
+++ b/tools/debugging/kernel-chktaint
@@ -0,0 +1,202 @@
+#! /bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Randy Dunlap <rdunlap@infradead.org>, 2018
+# Thorsten Leemhuis <linux@leemhuis.info>, 2018
+
+usage()
+{
+ cat <<EOF
+usage: ${0##*/}
+ ${0##*/} <int>
+
+Call without parameters to decode /proc/sys/kernel/tainted.
+
+Call with a positive integer as parameter to decode a value you
+retrieved from /proc/sys/kernel/tainted on another system.
+
+EOF
+}
+
+if [ "$1"x != "x" ]; then
+ if [ "$1"x == "--helpx" ] || [ "$1"x == "-hx" ] ; then
+ usage
+ exit 1
+ elif [ $1 -ge 0 ] 2>/dev/null ; then
+ taint=$1
+ else
+ echo "Error: Parameter '$1' not a positive interger. Aborting." >&2
+ exit 1
+ fi
+else
+ TAINTFILE="/proc/sys/kernel/tainted"
+ if [ ! -r $TAINTFILE ]; then
+ echo "No file: $TAINTFILE"
+ exit
+ fi
+
+ taint=`cat $TAINTFILE`
+fi
+
+if [ $taint -eq 0 ]; then
+ echo "Kernel not Tainted"
+ exit
+else
+ echo "Kernel is \"tainted\" for the following reasons:"
+fi
+
+T=$taint
+out=
+
+addout() {
+ out=$out$1
+}
+
+if [ `expr $T % 2` -eq 0 ]; then
+ addout "G"
+else
+ addout "P"
+ echo " * proprietary module was loaded (#0)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "F"
+ echo " * module was force loaded (#1)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "S"
+ echo " * SMP kernel oops on an officially SMP incapable processor (#2)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "R"
+ echo " * module was force unloaded (#3)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "M"
+ echo " * processor reported a Machine Check Exception (MCE) (#4)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "B"
+ echo " * bad page referenced or some unexpected page flags (#5)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "U"
+ echo " * taint requested by userspace application (#6)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "D"
+ echo " * kernel died recently, i.e. there was an OOPS or BUG (#7)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "A"
+ echo " * an ACPI table was overridden by user (#8)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "W"
+ echo " * kernel issued warning (#9)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "C"
+ echo " * staging driver was loaded (#10)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "I"
+ echo " * workaround for bug in platform firmware applied (#11)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "O"
+ echo " * externally-built ('out-of-tree') module was loaded (#12)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "E"
+ echo " * unsigned module was loaded (#13)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "L"
+ echo " * soft lockup occurred (#14)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "K"
+ echo " * kernel has been live patched (#15)"
+fi
+
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "X"
+ echo " * auxiliary taint, defined for and used by distros (#16)"
+
+fi
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "T"
+ echo " * kernel was built with the struct randomization plugin (#17)"
+fi
+
+echo "For a more detailed explanation of the various taint flags see"
+echo " Documentation/admin-guide/tainted-kernels.rst in the the Linux kernel sources"
+echo " or https://kernel.org/doc/html/latest/admin-guide/tainted-kernels.html"
+echo "Raw taint value as int/string: $taint/'$out'"
+#EOF#
diff --git a/tools/firewire/nosy-dump.c b/tools/firewire/nosy-dump.c
index 3179c711bd65..156e0356e814 100644
--- a/tools/firewire/nosy-dump.c
+++ b/tools/firewire/nosy-dump.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* nosy-dump - Interface to snoop mode driver for TI PCILynx 1394 controllers
* Copyright (C) 2002-2006 Kristian Høgsberg
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <byteswap.h>
diff --git a/tools/firmware/ihex2fw.c b/tools/firmware/ihex2fw.c
index b58dd061e978..2ebed47680b1 100644
--- a/tools/firmware/ihex2fw.c
+++ b/tools/firmware/ihex2fw.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Parser/loader for IHEX formatted data.
*
* Copyright © 2008 David Woodhouse <dwmw2@infradead.org>
* Copyright © 2005 Jan Harkes <jaharkes@cs.cmu.edu>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <stdint.h>
@@ -24,6 +21,10 @@
#include <getopt.h>
+#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
+
struct ihex_binrec {
struct ihex_binrec *next; /* not part of the real data structure */
uint32_t addr;
@@ -131,6 +132,7 @@ int main(int argc, char **argv)
static int process_ihex(uint8_t *data, ssize_t size)
{
struct ihex_binrec *record;
+ size_t record_size;
uint32_t offset = 0;
uint32_t data32;
uint8_t type, crc = 0, crcbyte = 0;
@@ -157,12 +159,13 @@ next_record:
len <<= 8;
len += hex(data + i, &crc); i += 2;
}
- record = malloc((sizeof (*record) + len + 3) & ~3);
+ record_size = ALIGN(sizeof(*record) + len, 4);
+ record = malloc(record_size);
if (!record) {
fprintf(stderr, "out of memory for records\n");
return -ENOMEM;
}
- memset(record, 0, (sizeof(*record) + len + 3) & ~3);
+ memset(record, 0, record_size);
record->len = len;
/* now check if we have enough data to read everything */
@@ -259,13 +262,18 @@ static void file_record(struct ihex_binrec *record)
*p = record;
}
+static uint16_t ihex_binrec_size(struct ihex_binrec *p)
+{
+ return p->len + sizeof(p->addr) + sizeof(p->len);
+}
+
static int output_records(int outfd)
{
unsigned char zeroes[6] = {0, 0, 0, 0, 0, 0};
struct ihex_binrec *p = records;
while (p) {
- uint16_t writelen = (p->len + 9) & ~3;
+ uint16_t writelen = ALIGN(ihex_binrec_size(p), 4);
p->addr = htonl(p->addr);
p->len = htons(p->len);
diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
index c864544efe05..30ed0e06f52a 100644
--- a/tools/gpio/gpio-event-mon.c
+++ b/tools/gpio/gpio-event-mon.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* gpio-event-mon - monitor GPIO line events from userspace
*
* Copyright (C) 2016 Linus Walleij
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
* Usage:
* gpio-event-mon -n <device-name> -o <offset>
*/
diff --git a/tools/gpio/gpio-hammer.c b/tools/gpio/gpio-hammer.c
index 4bcb234c0fca..0e0060a6eb34 100644
--- a/tools/gpio/gpio-hammer.c
+++ b/tools/gpio/gpio-hammer.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* gpio-hammer - example swiss army knife to shake GPIO lines on a system
*
* Copyright (C) 2016 Linus Walleij
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
* Usage:
* gpio-hammer -n <device-name> -o <offset1> -o <offset2>
*/
diff --git a/tools/gpio/gpio-utils.c b/tools/gpio/gpio-utils.c
index cf7e2f3419ee..53470de6a502 100644
--- a/tools/gpio/gpio-utils.c
+++ b/tools/gpio/gpio-utils.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* GPIO tools - helpers library for the GPIO tools
*
* Copyright (C) 2015 Linus Walleij
* Copyright (C) 2016 Bamvor Jian Zhang
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#include <unistd.h>
diff --git a/tools/gpio/gpio-utils.h b/tools/gpio/gpio-utils.h
index 344ea041f8d4..cf37f13f3dcb 100644
--- a/tools/gpio/gpio-utils.h
+++ b/tools/gpio/gpio-utils.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* GPIO tools - utility helpers library for the GPIO tools
*
@@ -7,9 +8,6 @@
* Copyright (c) 2010 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
* Copyright (c) 2008 Jonathan Cameron
* *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#ifndef _GPIO_UTILS_H_
#define _GPIO_UTILS_H_
diff --git a/tools/gpio/lsgpio.c b/tools/gpio/lsgpio.c
index eb3f56efd215..e1430f504c13 100644
--- a/tools/gpio/lsgpio.c
+++ b/tools/gpio/lsgpio.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* lsgpio - example on how to list the GPIO lines on a system
*
* Copyright (C) 2015 Linus Walleij
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
* Usage:
* lsgpio <-n device-name>
*/
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
index 8ff8cb1a11f4..aea2d91ab364 100644
--- a/tools/hv/hv_fcopy_daemon.c
+++ b/tools/hv/hv_fcopy_daemon.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* An implementation of host to guest copy functionality for Linux.
*
* Copyright (C) 2014, Microsoft, Inc.
*
* Author : K. Y. Srinivasan <kys@microsoft.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
*/
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index b13300172762..efe1e34dd91b 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* An implementation of the host initiated guest snapshot for Hyper-V.
*
- *
* Copyright (C) 2013, Microsoft, Inc.
* Author : K. Y. Srinivasan <kys@microsoft.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
*/
diff --git a/tools/iio/iio_event_monitor.c b/tools/iio/iio_event_monitor.c
index ac2de6b7e89f..f115d166c985 100644
--- a/tools/iio/iio_event_monitor.c
+++ b/tools/iio/iio_event_monitor.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Industrialio event test code.
*
* Copyright (c) 2011-2012 Lars-Peter Clausen <lars@metafoo.de>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
* This program is primarily intended as an example application.
* Reads the current buffer setup from sysfs and starts a short capture
* from the specified device, pretty printing the result after appropriate
@@ -60,6 +57,7 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_GRAVITY] = "gravity",
[IIO_POSITIONRELATIVE] = "positionrelative",
[IIO_PHASE] = "phase",
+ [IIO_MASSCONCENTRATION] = "massconcentration",
};
static const char * const iio_ev_type_text[] = {
@@ -114,7 +112,13 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_I] = "i",
[IIO_MOD_Q] = "q",
[IIO_MOD_CO2] = "co2",
+ [IIO_MOD_ETHANOL] = "ethanol",
+ [IIO_MOD_H2] = "h2",
[IIO_MOD_VOC] = "voc",
+ [IIO_MOD_PM1] = "pm1",
+ [IIO_MOD_PM2P5] = "pm2p5",
+ [IIO_MOD_PM4] = "pm4",
+ [IIO_MOD_PM10] = "pm10",
};
static bool event_is_known(struct iio_event_data *event)
@@ -156,6 +160,7 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_GRAVITY:
case IIO_POSITIONRELATIVE:
case IIO_PHASE:
+ case IIO_MASSCONCENTRATION:
break;
default:
return false;
@@ -199,7 +204,13 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_MOD_I:
case IIO_MOD_Q:
case IIO_MOD_CO2:
+ case IIO_MOD_ETHANOL:
+ case IIO_MOD_H2:
case IIO_MOD_VOC:
+ case IIO_MOD_PM1:
+ case IIO_MOD_PM2P5:
+ case IIO_MOD_PM4:
+ case IIO_MOD_PM10:
break;
default:
return false;
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index 84545666a09c..34d63bcebcd2 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Industrialio buffer test code.
*
* Copyright (c) 2008 Jonathan Cameron
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
* This program is primarily intended as an example application.
* Reads the current buffer setup from sysfs and starts a short capture
* from the specified device, pretty printing the result after appropriate
@@ -15,7 +12,6 @@
* generic_buffer -n <device_name> -t <trigger_name>
* If trigger name is not specified the program assumes you want a dataready
* trigger associated with the device and goes looking for it.
- *
*/
#include <unistd.h>
diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c
index 7a6d61c6c012..a22b6e8fad46 100644
--- a/tools/iio/iio_utils.c
+++ b/tools/iio/iio_utils.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* IIO - useful set of util functionality
*
* Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#include <string.h>
#include <stdlib.h>
diff --git a/tools/iio/iio_utils.h b/tools/iio/iio_utils.h
index 8b379da26e35..74bde4fde2c8 100644
--- a/tools/iio/iio_utils.h
+++ b/tools/iio/iio_utils.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _IIO_UTILS_H_
#define _IIO_UTILS_H_
/* IIO - useful set of util functionality
*
* Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#include <stdint.h>
diff --git a/tools/iio/lsiio.c b/tools/iio/lsiio.c
index ab0f5cf16025..2cf56fb2449b 100644
--- a/tools/iio/lsiio.c
+++ b/tools/iio/lsiio.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Industrial I/O utilities - lsiio.c
*
* Copyright (c) 2010 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#include <string.h>
diff --git a/tools/include/asm-generic/barrier.h b/tools/include/asm-generic/barrier.h
index 52278d880a61..6ef36e920ea8 100644
--- a/tools/include/asm-generic/barrier.h
+++ b/tools/include/asm-generic/barrier.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copied from the kernel sources to tools/perf/:
*
@@ -8,11 +9,6 @@
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
*/
#ifndef __TOOLS_LINUX_ASM_GENERIC_BARRIER_H
#define __TOOLS_LINUX_ASM_GENERIC_BARRIER_H
diff --git a/tools/include/linux/coresight-pmu.h b/tools/include/linux/coresight-pmu.h
index a1a959ba24ff..b0e35eec6499 100644
--- a/tools/include/linux/coresight-pmu.h
+++ b/tools/include/linux/coresight-pmu.h
@@ -12,11 +12,13 @@
/* ETMv3.5/PTM's ETMCR config bit */
#define ETM_OPT_CYCACC 12
+#define ETM_OPT_CTXTID 14
#define ETM_OPT_TS 28
#define ETM_OPT_RETSTK 29
/* ETMv4 CONFIGR programming bits for the ETM OPTs */
#define ETM4_CFG_BIT_CYCACC 4
+#define ETM4_CFG_BIT_CTXTID 6
#define ETM4_CFG_BIT_TS 11
#define ETM4_CFG_BIT_RETSTK 12
diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h
index af55acf73e75..ca28b6ab8db7 100644
--- a/tools/include/linux/filter.h
+++ b/tools/include/linux/filter.h
@@ -199,6 +199,16 @@
.off = OFF, \
.imm = 0 })
+/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_REG(OP, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
@@ -209,6 +219,16 @@
.off = OFF, \
.imm = IMM })
+/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
+
+#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = OFF, \
+ .imm = IMM })
+
/* Unconditional jumps, goto pc + off16 */
#define BPF_JMP_A(OFF) \
@@ -258,10 +278,29 @@
.off = 0, \
.imm = ((__u64) (IMM)) >> 32 })
+#define BPF_LD_IMM64_RAW_FULL(DST, SRC, OFF1, OFF2, IMM1, IMM2) \
+ ((struct bpf_insn) { \
+ .code = BPF_LD | BPF_DW | BPF_IMM, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF1, \
+ .imm = IMM1 }), \
+ ((struct bpf_insn) { \
+ .code = 0, /* zero is reserved opcode */ \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = OFF2, \
+ .imm = IMM2 })
+
/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
#define BPF_LD_MAP_FD(DST, MAP_FD) \
- BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
+ BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_FD, 0, 0, \
+ MAP_FD, 0)
+
+#define BPF_LD_MAP_VALUE(DST, MAP_FD, VALUE_OFF) \
+ BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_VALUE, 0, 0, \
+ MAP_FD, VALUE_OFF)
/* Relative call */
diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h
index 0325cefc2220..e20a67d538b8 100644
--- a/tools/include/linux/log2.h
+++ b/tools/include/linux/log2.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Integer base 2 logarithm calculation
*
* Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _TOOLS_LINUX_LOG2_H
diff --git a/tools/include/linux/numa.h b/tools/include/linux/numa.h
new file mode 100644
index 000000000000..110b0e5d0fb0
--- /dev/null
+++ b/tools/include/linux/numa.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_NUMA_H
+#define _LINUX_NUMA_H
+
+
+#ifdef CONFIG_NODES_SHIFT
+#define NODES_SHIFT CONFIG_NODES_SHIFT
+#else
+#define NODES_SHIFT 0
+#endif
+
+#define MAX_NUMNODES (1 << NODES_SHIFT)
+
+#define NUMA_NO_NODE (-1)
+
+#endif /* _LINUX_NUMA_H */
diff --git a/tools/include/linux/poison.h b/tools/include/linux/poison.h
index 9fdcd3eaac3b..d29725769107 100644
--- a/tools/include/linux/poison.h
+++ b/tools/include/linux/poison.h
@@ -87,9 +87,6 @@
#define MUTEX_DEBUG_INIT 0x11
#define MUTEX_DEBUG_FREE 0x22
-/********** lib/flex_array.c **********/
-#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */
-
/********** security/ **********/
#define KEY_DESTROY 0xbd
diff --git a/tools/include/linux/rbtree.h b/tools/include/linux/rbtree.h
index 112582253dd0..d83763a5327c 100644
--- a/tools/include/linux/rbtree.h
+++ b/tools/include/linux/rbtree.h
@@ -1,20 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
linux/include/linux/rbtree.h
@@ -43,13 +31,28 @@ struct rb_root {
struct rb_node *rb_node;
};
+/*
+ * Leftmost-cached rbtrees.
+ *
+ * We do not cache the rightmost node based on footprint
+ * size vs number of potential users that could benefit
+ * from O(1) rb_last(). Just not worth it, users that want
+ * this feature can always implement the logic explicitly.
+ * Furthermore, users that want to cache both pointers may
+ * find it a bit asymmetric, but that's ok.
+ */
+struct rb_root_cached {
+ struct rb_root rb_root;
+ struct rb_node *rb_leftmost;
+};
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
#define RB_ROOT (struct rb_root) { NULL, }
+#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
-#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
+#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
#define RB_EMPTY_NODE(node) \
@@ -68,6 +71,12 @@ extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
+extern void rb_insert_color_cached(struct rb_node *,
+ struct rb_root_cached *, bool);
+extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *);
+/* Same as rb_first(), but O(1) */
+#define rb_first_cached(root) (root)->rb_leftmost
+
/* Postorder iteration - always visit the parent after its children */
extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *);
@@ -75,6 +84,8 @@ extern struct rb_node *rb_next_postorder(const struct rb_node *);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
+extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
+ struct rb_root_cached *root);
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
@@ -90,12 +101,29 @@ static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
____ptr ? rb_entry(____ptr, type, member) : NULL; \
})
-
-/*
- * Handy for checking that we are not deleting an entry that is
- * already in a list, found in block/{blk-throttle,cfq-iosched}.c,
- * probably should be moved to lib/rbtree.c...
+/**
+ * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of
+ * given type allowing the backing memory of @pos to be invalidated
+ *
+ * @pos: the 'type *' to use as a loop cursor.
+ * @n: another 'type *' to use as temporary storage
+ * @root: 'rb_root *' of the rbtree.
+ * @field: the name of the rb_node field within 'type'.
+ *
+ * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as
+ * list_for_each_entry_safe() and allows the iteration to continue independent
+ * of changes to @pos by the body of the loop.
+ *
+ * Note, however, that it cannot handle other modifications that re-order the
+ * rbtree it is iterating over. This includes calling rb_erase() on @pos, as
+ * rb_erase() may rebalance the tree, causing us to miss some nodes.
*/
+#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
+ for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
+ pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
+ typeof(*pos), field); 1; }); \
+ pos = n)
+
static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
{
rb_erase(n, root);
diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h
index 43be941db695..ddd01006ece5 100644
--- a/tools/include/linux/rbtree_augmented.h
+++ b/tools/include/linux/rbtree_augmented.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
(C) 2002 David Woodhouse <dwmw2@infradead.org>
(C) 2012 Michel Lespinasse <walken@google.com>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
tools/linux/include/linux/rbtree_augmented.h
@@ -44,7 +32,9 @@ struct rb_augment_callbacks {
void (*rotate)(struct rb_node *old, struct rb_node *new);
};
-extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+extern void __rb_insert_augmented(struct rb_node *node,
+ struct rb_root *root,
+ bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
/*
* Fixup the rbtree and update the augmented information when rebalancing.
@@ -60,7 +50,16 @@ static inline void
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
- __rb_insert_augmented(node, root, augment->rotate);
+ __rb_insert_augmented(node, root, false, NULL, augment->rotate);
+}
+
+static inline void
+rb_insert_augmented_cached(struct rb_node *node,
+ struct rb_root_cached *root, bool newleft,
+ const struct rb_augment_callbacks *augment)
+{
+ __rb_insert_augmented(node, &root->rb_root,
+ newleft, &root->rb_leftmost, augment->rotate);
}
#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
@@ -93,7 +92,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
old->rbaugmented = rbcompute(old); \
} \
rbstatic const struct rb_augment_callbacks rbname = { \
- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
+ .propagate = rbname ## _propagate, \
+ .copy = rbname ## _copy, \
+ .rotate = rbname ## _rotate \
};
@@ -126,11 +127,11 @@ __rb_change_child(struct rb_node *old, struct rb_node *new,
{
if (parent) {
if (parent->rb_left == old)
- parent->rb_left = new;
+ WRITE_ONCE(parent->rb_left, new);
else
- parent->rb_right = new;
+ WRITE_ONCE(parent->rb_right, new);
} else
- root->rb_node = new;
+ WRITE_ONCE(root->rb_node, new);
}
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
@@ -138,12 +139,17 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
static __always_inline struct rb_node *
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+ struct rb_node **leftmost,
const struct rb_augment_callbacks *augment)
{
- struct rb_node *child = node->rb_right, *tmp = node->rb_left;
+ struct rb_node *child = node->rb_right;
+ struct rb_node *tmp = node->rb_left;
struct rb_node *parent, *rebalance;
unsigned long pc;
+ if (leftmost && node == *leftmost)
+ *leftmost = rb_next(node);
+
if (!tmp) {
/*
* Case 1: node to erase has no more than 1 child (easy!)
@@ -170,6 +176,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
tmp = parent;
} else {
struct rb_node *successor = child, *child2;
+
tmp = child->rb_left;
if (!tmp) {
/*
@@ -183,6 +190,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
*/
parent = successor;
child2 = successor->rb_right;
+
augment->copy(node, successor);
} else {
/*
@@ -204,19 +212,23 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
successor = tmp;
tmp = tmp->rb_left;
} while (tmp);
- parent->rb_left = child2 = successor->rb_right;
- successor->rb_right = child;
+ child2 = successor->rb_right;
+ WRITE_ONCE(parent->rb_left, child2);
+ WRITE_ONCE(successor->rb_right, child);
rb_set_parent(child, successor);
+
augment->copy(node, successor);
augment->propagate(parent, successor);
}
- successor->rb_left = tmp = node->rb_left;
+ tmp = node->rb_left;
+ WRITE_ONCE(successor->rb_left, tmp);
rb_set_parent(tmp, successor);
pc = node->__rb_parent_color;
tmp = __rb_parent(pc);
__rb_change_child(node, successor, tmp, root);
+
if (child2) {
successor->__rb_parent_color = pc;
rb_set_parent_color(child2, parent, RB_BLACK);
@@ -237,9 +249,21 @@ static __always_inline void
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
- struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
+ struct rb_node *rebalance = __rb_erase_augmented(node, root,
+ NULL, augment);
if (rebalance)
__rb_erase_color(rebalance, root, augment->rotate);
}
-#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
+static __always_inline void
+rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root,
+ const struct rb_augment_callbacks *augment)
+{
+ struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root,
+ &root->rb_leftmost,
+ augment);
+ if (rebalance)
+ __rb_erase_color(rebalance, &root->rb_root, augment->rotate);
+}
+
+#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
diff --git a/tools/testing/selftests/rcutorture/bin/nolibc.h b/tools/include/nolibc/nolibc.h
index f98f5b92d3eb..2551e9b71167 100644
--- a/tools/testing/selftests/rcutorture/bin/nolibc.h
+++ b/tools/include/nolibc/nolibc.h
@@ -3,7 +3,85 @@
* Copyright (C) 2017-2018 Willy Tarreau <w@1wt.eu>
*/
-/* some archs (at least aarch64) don't expose the regular syscalls anymore by
+/*
+ * This file is designed to be used as a libc alternative for minimal programs
+ * with very limited requirements. It consists of a small number of syscall and
+ * type definitions, and the minimal startup code needed to call main().
+ * All syscalls are declared as static functions so that they can be optimized
+ * away by the compiler when not used.
+ *
+ * Syscalls are split into 3 levels:
+ * - The lower level is the arch-specific syscall() definition, consisting in
+ * assembly code in compound expressions. These are called my_syscall0() to
+ * my_syscall6() depending on the number of arguments. The MIPS
+ * implementation is limited to 5 arguments. All input arguments are cast
+ * to a long stored in a register. These expressions always return the
+ * syscall's return value as a signed long value which is often either a
+ * pointer or the negated errno value.
+ *
+ * - The second level is mostly architecture-independent. It is made of
+ * static functions called sys_<name>() which rely on my_syscallN()
+ * depending on the syscall definition. These functions are responsible
+ * for exposing the appropriate types for the syscall arguments (int,
+ * pointers, etc) and for setting the appropriate return type (often int).
+ * A few of them are architecture-specific because the syscalls are not all
+ * mapped exactly the same among architectures. For example, some archs do
+ * not implement select() and need pselect6() instead, so the sys_select()
+ * function will have to abstract this.
+ *
+ * - The third level is the libc call definition. It exposes the lower raw
+ * sys_<name>() calls in a way that looks like what a libc usually does,
+ * takes care of specific input values, and of setting errno upon error.
+ * There can be minor variations compared to standard libc calls. For
+ * example the open() call always takes 3 args here.
+ *
+ * The errno variable is declared static and unused. This way it can be
+ * optimized away if not used. However this means that a program made of
+ * multiple C files may observe different errno values (one per C file). For
+ * the type of programs this project targets it usually is not a problem. The
+ * resulting program may even be reduced by defining the NOLIBC_IGNORE_ERRNO
+ * macro, in which case the errno value will never be assigned.
+ *
+ * Some stdint-like integer types are defined. These are valid on all currently
+ * supported architectures, because signs are enforced, ints are assumed to be
+ * 32 bits, longs the size of a pointer and long long 64 bits. If more
+ * architectures have to be supported, this may need to be adapted.
+ *
+ * Some macro definitions like the O_* values passed to open(), and some
+ * structures like the sys_stat struct depend on the architecture.
+ *
+ * The definitions start with the architecture-specific parts, which are picked
+ * based on what the compiler knows about the target architecture, and are
+ * completed with the generic code. Since it is the compiler which sets the
+ * target architecture, cross-compiling normally works out of the box without
+ * having to specify anything.
+ *
+ * Finally some very common libc-level functions are provided. It is the case
+ * for a few functions usually found in string.h, ctype.h, or stdlib.h. Nothing
+ * is currently provided regarding stdio emulation.
+ *
+ * The macro NOLIBC is always defined, so that it is possible for a program to
+ * check this macro to know if it is being built against and decide to disable
+ * some features or simply not to include some standard libc files.
+ *
+ * Ideally this file should be split in multiple files for easier long term
+ * maintenance, but provided as a single file as it is now, it's quite
+ * convenient to use. Maybe some variations involving a set of includes at the
+ * top could work.
+ *
+ * A simple static executable may be built this way :
+ * $ gcc -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \
+ * -static -include nolibc.h -lgcc -o hello hello.c
+ *
+ * A very useful calling convention table may be found here :
+ * http://man7.org/linux/man-pages/man2/syscall.2.html
+ *
+ * This doc is quite convenient though not necessarily up to date :
+ * https://w3challs.com/syscalls/
+ *
+ */
+
+/* Some archs (at least aarch64) don't expose the regular syscalls anymore by
* default, either because they have an "_at" replacement, or because there are
* more modern alternatives. For now we'd rather still use them.
*/
@@ -19,18 +97,6 @@
#define NOLIBC
-/* Build a static executable this way :
- * $ gcc -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \
- * -static -include nolibc.h -lgcc -o hello hello.c
- *
- * Useful calling convention table found here :
- * http://man7.org/linux/man-pages/man2/syscall.2.html
- *
- * This doc is even better :
- * https://w3challs.com/syscalls/
- */
-
-
/* this way it will be removed if unused */
static int errno;
@@ -81,9 +147,9 @@ typedef signed long time_t;
/* for poll() */
struct pollfd {
- int fd;
- short int events;
- short int revents;
+ int fd;
+ short int events;
+ short int revents;
};
/* for select() */
@@ -239,7 +305,7 @@ struct stat {
"syscall\n" \
: "=a" (_ret) \
: "0"(_num) \
- : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
+ : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
); \
_ret; \
})
@@ -255,7 +321,7 @@ struct stat {
: "=a" (_ret) \
: "r"(_arg1), \
"0"(_num) \
- : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
+ : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
); \
_ret; \
})
@@ -272,7 +338,7 @@ struct stat {
: "=a" (_ret) \
: "r"(_arg1), "r"(_arg2), \
"0"(_num) \
- : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
+ : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
); \
_ret; \
})
@@ -290,7 +356,7 @@ struct stat {
: "=a" (_ret) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), \
"0"(_num) \
- : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
+ : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
); \
_ret; \
})
@@ -1006,7 +1072,7 @@ struct sys_stat_struct {
: "=r"(_num), "=r"(_arg4) \
: "r"(_num) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
@@ -1025,7 +1091,7 @@ struct sys_stat_struct {
: "0"(_num), \
"r"(_arg1) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
@@ -1045,7 +1111,7 @@ struct sys_stat_struct {
: "0"(_num), \
"r"(_arg1), "r"(_arg2) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
@@ -1066,7 +1132,7 @@ struct sys_stat_struct {
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
@@ -1087,7 +1153,7 @@ struct sys_stat_struct {
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
@@ -1110,7 +1176,7 @@ struct sys_stat_struct {
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \
: "memory", "cc", "at", "v1", "hi", "lo", \
- \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
); \
_arg4 ? -_num : _num; \
})
@@ -1177,6 +1243,200 @@ struct sys_stat_struct {
long st_pad4[14];
};
+#elif defined(__riscv)
+
+#if __riscv_xlen == 64
+#define PTRLOG "3"
+#define SZREG "8"
+#elif __riscv_xlen == 32
+#define PTRLOG "2"
+#define SZREG "4"
+#endif
+
+/* Syscalls for RISCV :
+ * - stack is 16-byte aligned
+ * - syscall number is passed in a7
+ * - arguments are in a0, a1, a2, a3, a4, a5
+ * - the system call is performed by calling ecall
+ * - syscall return comes in a0
+ * - the arguments are cast to long and assigned into the target
+ * registers which are then simply passed as registers to the asm code,
+ * so that we don't have to experience issues with register constraints.
+ */
+
+#define my_syscall0(num) \
+({ \
+ register long _num asm("a7") = (num); \
+ register long _arg1 asm("a0"); \
+ \
+ asm volatile ( \
+ "ecall\n\t" \
+ : "=r"(_arg1) \
+ : "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall1(num, arg1) \
+({ \
+ register long _num asm("a7") = (num); \
+ register long _arg1 asm("a0") = (long)(arg1); \
+ \
+ asm volatile ( \
+ "ecall\n" \
+ : "+r"(_arg1) \
+ : "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ register long _num asm("a7") = (num); \
+ register long _arg1 asm("a0") = (long)(arg1); \
+ register long _arg2 asm("a1") = (long)(arg2); \
+ \
+ asm volatile ( \
+ "ecall\n" \
+ : "+r"(_arg1) \
+ : "r"(_arg2), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall3(num, arg1, arg2, arg3) \
+({ \
+ register long _num asm("a7") = (num); \
+ register long _arg1 asm("a0") = (long)(arg1); \
+ register long _arg2 asm("a1") = (long)(arg2); \
+ register long _arg3 asm("a2") = (long)(arg3); \
+ \
+ asm volatile ( \
+ "ecall\n\t" \
+ : "+r"(_arg1) \
+ : "r"(_arg2), "r"(_arg3), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall4(num, arg1, arg2, arg3, arg4) \
+({ \
+ register long _num asm("a7") = (num); \
+ register long _arg1 asm("a0") = (long)(arg1); \
+ register long _arg2 asm("a1") = (long)(arg2); \
+ register long _arg3 asm("a2") = (long)(arg3); \
+ register long _arg4 asm("a3") = (long)(arg4); \
+ \
+ asm volatile ( \
+ "ecall\n" \
+ : "+r"(_arg1) \
+ : "r"(_arg2), "r"(_arg3), "r"(_arg4), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ register long _num asm("a7") = (num); \
+ register long _arg1 asm("a0") = (long)(arg1); \
+ register long _arg2 asm("a1") = (long)(arg2); \
+ register long _arg3 asm("a2") = (long)(arg3); \
+ register long _arg4 asm("a3") = (long)(arg4); \
+ register long _arg5 asm("a4") = (long)(arg5); \
+ \
+ asm volatile ( \
+ "ecall\n" \
+ : "+r"(_arg1) \
+ : "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
+({ \
+ register long _num asm("a7") = (num); \
+ register long _arg1 asm("a0") = (long)(arg1); \
+ register long _arg2 asm("a1") = (long)(arg2); \
+ register long _arg3 asm("a2") = (long)(arg3); \
+ register long _arg4 asm("a3") = (long)(arg4); \
+ register long _arg5 asm("a4") = (long)(arg5); \
+ register long _arg6 asm("a5") = (long)(arg6); \
+ \
+ asm volatile ( \
+ "ecall\n" \
+ : "+r"(_arg1) \
+ : "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), "r"(_arg6), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+/* startup code */
+asm(".section .text\n"
+ ".global _start\n"
+ "_start:\n"
+ ".option push\n"
+ ".option norelax\n"
+ "lla gp, __global_pointer$\n"
+ ".option pop\n"
+ "ld a0, 0(sp)\n" // argc (a0) was in the stack
+ "add a1, sp, "SZREG"\n" // argv (a1) = sp
+ "slli a2, a0, "PTRLOG"\n" // envp (a2) = SZREG*argc ...
+ "add a2, a2, "SZREG"\n" // + SZREG (skip null)
+ "add a2,a2,a1\n" // + argv
+ "andi sp,a1,-16\n" // sp must be 16-byte aligned
+ "call main\n" // main() returns the status code, we'll exit with it.
+ "andi a0, a0, 0xff\n" // limit exit code to 8 bits
+ "li a7, 93\n" // NR_exit == 93
+ "ecall\n"
+ "");
+
+/* fcntl / open */
+#define O_RDONLY 0
+#define O_WRONLY 1
+#define O_RDWR 2
+#define O_CREAT 0x100
+#define O_EXCL 0x200
+#define O_NOCTTY 0x400
+#define O_TRUNC 0x1000
+#define O_APPEND 0x2000
+#define O_NONBLOCK 0x4000
+#define O_DIRECTORY 0x200000
+
+struct sys_stat_struct {
+ unsigned long st_dev; /* Device. */
+ unsigned long st_ino; /* File serial number. */
+ unsigned int st_mode; /* File mode. */
+ unsigned int st_nlink; /* Link count. */
+ unsigned int st_uid; /* User ID of the file's owner. */
+ unsigned int st_gid; /* Group ID of the file's group. */
+ unsigned long st_rdev; /* Device number, if device. */
+ unsigned long __pad1;
+ long st_size; /* Size of file, in bytes. */
+ int st_blksize; /* Optimal block size for I/O. */
+ int __pad2;
+ long st_blocks; /* Number 512-byte blocks allocated. */
+ long st_atime; /* Time of last access. */
+ unsigned long st_atime_nsec;
+ long st_mtime; /* Time of last modification. */
+ unsigned long st_mtime_nsec;
+ long st_ctime; /* Time of last status change. */
+ unsigned long st_ctime_nsec;
+ unsigned int __unused4;
+ unsigned int __unused5;
+};
+
#endif
diff --git a/tools/include/uapi/asm-generic/mman-common-tools.h b/tools/include/uapi/asm-generic/mman-common-tools.h
new file mode 100644
index 000000000000..af7d0d3a3182
--- /dev/null
+++ b/tools/include/uapi/asm-generic/mman-common-tools.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
+#define __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
+
+#include <asm-generic/mman-common.h>
+
+/* We need this because we need to have tools/include/uapi/ included in the tools
+ * header search path to get access to stuff that is not yet in the system's
+ * copy of the files in that directory, but since this cset:
+ *
+ * 746c9398f5ac ("arch: move common mmap flags to linux/mman.h")
+ *
+ * We end up making sys/mman.h, that is in the system headers, to not find the
+ * MAP_SHARED and MAP_PRIVATE defines because they are not anymore in our copy
+ * of asm-generic/mman-common.h. So we define them here and include this header
+ * from each of the per arch mman.h headers.
+ */
+#ifndef MAP_SHARED
+#define MAP_SHARED 0x01 /* Share changes */
+#define MAP_PRIVATE 0x02 /* Changes are private */
+#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
+#endif
+#endif // __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index e7ee32861d51..abd238d0f7a4 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -15,9 +15,7 @@
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
-#define MAP_SHARED 0x01 /* Share changes */
-#define MAP_PRIVATE 0x02 /* Changes are private */
-#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
+/* 0x01 - 0x03 are defined in linux/mman.h */
#define MAP_TYPE 0x0f /* Mask for type of mapping */
#define MAP_FIXED 0x10 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x20 /* don't use a file */
diff --git a/tools/include/uapi/asm-generic/mman.h b/tools/include/uapi/asm-generic/mman.h
index 653687d9771b..36c197fc44a0 100644
--- a/tools/include/uapi/asm-generic/mman.h
+++ b/tools/include/uapi/asm-generic/mman.h
@@ -2,7 +2,7 @@
#ifndef __ASM_GENERIC_MMAN_H
#define __ASM_GENERIC_MMAN_H
-#include <asm-generic/mman-common.h>
+#include <asm-generic/mman-common-tools.h>
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index d90127298f12..a87904daf103 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -38,8 +38,10 @@ __SYSCALL(__NR_io_destroy, sys_io_destroy)
__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
#define __NR_io_cancel 3
__SYSCALL(__NR_io_cancel, sys_io_cancel)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_getevents 4
-__SC_COMP(__NR_io_getevents, sys_io_getevents, compat_sys_io_getevents)
+__SC_3264(__NR_io_getevents, sys_io_getevents_time32, sys_io_getevents)
+#endif
/* fs/xattr.c */
#define __NR_setxattr 5
@@ -179,7 +181,7 @@ __SYSCALL(__NR_fchownat, sys_fchownat)
#define __NR_fchown 55
__SYSCALL(__NR_fchown, sys_fchown)
#define __NR_openat 56
-__SC_COMP(__NR_openat, sys_openat, compat_sys_openat)
+__SYSCALL(__NR_openat, sys_openat)
#define __NR_close 57
__SYSCALL(__NR_close, sys_close)
#define __NR_vhangup 58
@@ -222,10 +224,12 @@ __SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
__SYSCALL(__NR3264_sendfile, sys_sendfile64)
/* fs/select.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_pselect6 72
-__SC_COMP(__NR_pselect6, sys_pselect6, compat_sys_pselect6)
+__SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_pselect6_time32)
#define __NR_ppoll 73
-__SC_COMP(__NR_ppoll, sys_ppoll, compat_sys_ppoll)
+__SC_COMP_3264(__NR_ppoll, sys_ppoll_time32, sys_ppoll, compat_sys_ppoll_time32)
+#endif
/* fs/signalfd.c */
#define __NR_signalfd4 74
@@ -269,16 +273,20 @@ __SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
/* fs/timerfd.c */
#define __NR_timerfd_create 85
__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timerfd_settime 86
-__SC_COMP(__NR_timerfd_settime, sys_timerfd_settime, \
- compat_sys_timerfd_settime)
+__SC_3264(__NR_timerfd_settime, sys_timerfd_settime32, \
+ sys_timerfd_settime)
#define __NR_timerfd_gettime 87
-__SC_COMP(__NR_timerfd_gettime, sys_timerfd_gettime, \
- compat_sys_timerfd_gettime)
+__SC_3264(__NR_timerfd_gettime, sys_timerfd_gettime32, \
+ sys_timerfd_gettime)
+#endif
/* fs/utimes.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_utimensat 88
-__SC_COMP(__NR_utimensat, sys_utimensat, compat_sys_utimensat)
+__SC_3264(__NR_utimensat, sys_utimensat_time32, sys_utimensat)
+#endif
/* kernel/acct.c */
#define __NR_acct 89
@@ -309,8 +317,10 @@ __SYSCALL(__NR_set_tid_address, sys_set_tid_address)
__SYSCALL(__NR_unshare, sys_unshare)
/* kernel/futex.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_futex 98
-__SC_COMP(__NR_futex, sys_futex, compat_sys_futex)
+__SC_3264(__NR_futex, sys_futex_time32, sys_futex)
+#endif
#define __NR_set_robust_list 99
__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
compat_sys_set_robust_list)
@@ -319,8 +329,10 @@ __SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
compat_sys_get_robust_list)
/* kernel/hrtimer.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_nanosleep 101
-__SC_COMP(__NR_nanosleep, sys_nanosleep, compat_sys_nanosleep)
+__SC_3264(__NR_nanosleep, sys_nanosleep_time32, sys_nanosleep)
+#endif
/* kernel/itimer.c */
#define __NR_getitimer 102
@@ -341,23 +353,29 @@ __SYSCALL(__NR_delete_module, sys_delete_module)
/* kernel/posix-timers.c */
#define __NR_timer_create 107
__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_gettime 108
-__SC_COMP(__NR_timer_gettime, sys_timer_gettime, compat_sys_timer_gettime)
+__SC_3264(__NR_timer_gettime, sys_timer_gettime32, sys_timer_gettime)
+#endif
#define __NR_timer_getoverrun 109
__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_settime 110
-__SC_COMP(__NR_timer_settime, sys_timer_settime, compat_sys_timer_settime)
+__SC_3264(__NR_timer_settime, sys_timer_settime32, sys_timer_settime)
+#endif
#define __NR_timer_delete 111
__SYSCALL(__NR_timer_delete, sys_timer_delete)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_settime 112
-__SC_COMP(__NR_clock_settime, sys_clock_settime, compat_sys_clock_settime)
+__SC_3264(__NR_clock_settime, sys_clock_settime32, sys_clock_settime)
#define __NR_clock_gettime 113
-__SC_COMP(__NR_clock_gettime, sys_clock_gettime, compat_sys_clock_gettime)
+__SC_3264(__NR_clock_gettime, sys_clock_gettime32, sys_clock_gettime)
#define __NR_clock_getres 114
-__SC_COMP(__NR_clock_getres, sys_clock_getres, compat_sys_clock_getres)
+__SC_3264(__NR_clock_getres, sys_clock_getres_time32, sys_clock_getres)
#define __NR_clock_nanosleep 115
-__SC_COMP(__NR_clock_nanosleep, sys_clock_nanosleep, \
- compat_sys_clock_nanosleep)
+__SC_3264(__NR_clock_nanosleep, sys_clock_nanosleep_time32, \
+ sys_clock_nanosleep)
+#endif
/* kernel/printk.c */
#define __NR_syslog 116
@@ -388,9 +406,11 @@ __SYSCALL(__NR_sched_yield, sys_sched_yield)
__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
#define __NR_sched_get_priority_min 126
__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_sched_rr_get_interval 127
-__SC_COMP(__NR_sched_rr_get_interval, sys_sched_rr_get_interval, \
- compat_sys_sched_rr_get_interval)
+__SC_3264(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32, \
+ sys_sched_rr_get_interval)
+#endif
/* kernel/signal.c */
#define __NR_restart_syscall 128
@@ -411,9 +431,11 @@ __SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
__SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask)
#define __NR_rt_sigpending 136
__SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_rt_sigtimedwait 137
-__SC_COMP(__NR_rt_sigtimedwait, sys_rt_sigtimedwait, \
- compat_sys_rt_sigtimedwait)
+__SC_COMP_3264(__NR_rt_sigtimedwait, sys_rt_sigtimedwait_time32, \
+ sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32)
+#endif
#define __NR_rt_sigqueueinfo 138
__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
compat_sys_rt_sigqueueinfo)
@@ -467,10 +489,15 @@ __SYSCALL(__NR_uname, sys_newuname)
__SYSCALL(__NR_sethostname, sys_sethostname)
#define __NR_setdomainname 162
__SYSCALL(__NR_setdomainname, sys_setdomainname)
+
+#ifdef __ARCH_WANT_SET_GET_RLIMIT
+/* getrlimit and setrlimit are superseded with prlimit64 */
#define __NR_getrlimit 163
__SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit)
#define __NR_setrlimit 164
__SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit)
+#endif
+
#define __NR_getrusage 165
__SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage)
#define __NR_umask 166
@@ -481,12 +508,14 @@ __SYSCALL(__NR_prctl, sys_prctl)
__SYSCALL(__NR_getcpu, sys_getcpu)
/* kernel/time.c */
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_gettimeofday 169
__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
#define __NR_settimeofday 170
__SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
#define __NR_adjtimex 171
-__SC_COMP(__NR_adjtimex, sys_adjtimex, compat_sys_adjtimex)
+__SC_3264(__NR_adjtimex, sys_adjtimex_time32, sys_adjtimex)
+#endif
/* kernel/timer.c */
#define __NR_getpid 172
@@ -511,11 +540,13 @@ __SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
#define __NR_mq_unlink 181
__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_mq_timedsend 182
-__SC_COMP(__NR_mq_timedsend, sys_mq_timedsend, compat_sys_mq_timedsend)
+__SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
#define __NR_mq_timedreceive 183
-__SC_COMP(__NR_mq_timedreceive, sys_mq_timedreceive, \
- compat_sys_mq_timedreceive)
+__SC_3264(__NR_mq_timedreceive, sys_mq_timedreceive_time32, \
+ sys_mq_timedreceive)
+#endif
#define __NR_mq_notify 184
__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
#define __NR_mq_getsetattr 185
@@ -536,8 +567,10 @@ __SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
__SYSCALL(__NR_semget, sys_semget)
#define __NR_semctl 191
__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_semtimedop 192
-__SC_COMP(__NR_semtimedop, sys_semtimedop, compat_sys_semtimedop)
+__SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32)
+#endif
#define __NR_semop 193
__SYSCALL(__NR_semop, sys_semop)
@@ -658,8 +691,10 @@ __SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#define __NR_accept4 242
__SYSCALL(__NR_accept4, sys_accept4)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_recvmmsg 243
-__SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg)
+__SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recvmmsg_time32)
+#endif
/*
* Architectures may provide up to 16 syscalls of their own
@@ -667,8 +702,10 @@ __SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg)
*/
#define __NR_arch_specific_syscall 244
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_wait4 260
__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
+#endif
#define __NR_prlimit64 261
__SYSCALL(__NR_prlimit64, sys_prlimit64)
#define __NR_fanotify_init 262
@@ -678,10 +715,11 @@ __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
#define __NR_name_to_handle_at 264
__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
#define __NR_open_by_handle_at 265
-__SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \
- compat_sys_open_by_handle_at)
+__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_adjtime 266
-__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
+__SC_3264(__NR_clock_adjtime, sys_clock_adjtime32, sys_clock_adjtime)
+#endif
#define __NR_syncfs 267
__SYSCALL(__NR_syncfs, sys_syncfs)
#define __NR_setns 268
@@ -734,15 +772,81 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
__SYSCALL(__NR_pkey_free, sys_pkey_free)
#define __NR_statx 291
__SYSCALL(__NR_statx, sys_statx)
+#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_pgetevents 292
-__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
+__SC_COMP_3264(__NR_io_pgetevents, sys_io_pgetevents_time32, sys_io_pgetevents, compat_sys_io_pgetevents)
+#endif
#define __NR_rseq 293
__SYSCALL(__NR_rseq, sys_rseq)
#define __NR_kexec_file_load 294
__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
+/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
+#if __BITS_PER_LONG == 32
+#define __NR_clock_gettime64 403
+__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
+#define __NR_clock_settime64 404
+__SYSCALL(__NR_clock_settime64, sys_clock_settime)
+#define __NR_clock_adjtime64 405
+__SYSCALL(__NR_clock_adjtime64, sys_clock_adjtime)
+#define __NR_clock_getres_time64 406
+__SYSCALL(__NR_clock_getres_time64, sys_clock_getres)
+#define __NR_clock_nanosleep_time64 407
+__SYSCALL(__NR_clock_nanosleep_time64, sys_clock_nanosleep)
+#define __NR_timer_gettime64 408
+__SYSCALL(__NR_timer_gettime64, sys_timer_gettime)
+#define __NR_timer_settime64 409
+__SYSCALL(__NR_timer_settime64, sys_timer_settime)
+#define __NR_timerfd_gettime64 410
+__SYSCALL(__NR_timerfd_gettime64, sys_timerfd_gettime)
+#define __NR_timerfd_settime64 411
+__SYSCALL(__NR_timerfd_settime64, sys_timerfd_settime)
+#define __NR_utimensat_time64 412
+__SYSCALL(__NR_utimensat_time64, sys_utimensat)
+#define __NR_pselect6_time64 413
+__SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64)
+#define __NR_ppoll_time64 414
+__SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64)
+#define __NR_io_pgetevents_time64 416
+__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
+#define __NR_recvmmsg_time64 417
+__SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64)
+#define __NR_mq_timedsend_time64 418
+__SYSCALL(__NR_mq_timedsend_time64, sys_mq_timedsend)
+#define __NR_mq_timedreceive_time64 419
+__SYSCALL(__NR_mq_timedreceive_time64, sys_mq_timedreceive)
+#define __NR_semtimedop_time64 420
+__SYSCALL(__NR_semtimedop_time64, sys_semtimedop)
+#define __NR_rt_sigtimedwait_time64 421
+__SC_COMP(__NR_rt_sigtimedwait_time64, sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time64)
+#define __NR_futex_time64 422
+__SYSCALL(__NR_futex_time64, sys_futex)
+#define __NR_sched_rr_get_interval_time64 423
+__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
+#endif
+
+#define __NR_pidfd_send_signal 424
+__SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal)
+#define __NR_io_uring_setup 425
+__SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
+#define __NR_io_uring_enter 426
+__SYSCALL(__NR_io_uring_enter, sys_io_uring_enter)
+#define __NR_io_uring_register 427
+__SYSCALL(__NR_io_uring_register, sys_io_uring_register)
+#define __NR_open_tree 428
+__SYSCALL(__NR_open_tree, sys_open_tree)
+#define __NR_move_mount 429
+__SYSCALL(__NR_move_mount, sys_move_mount)
+#define __NR_fsopen 430
+__SYSCALL(__NR_fsopen, sys_fsopen)
+#define __NR_fsconfig 431
+__SYSCALL(__NR_fsconfig, sys_fsconfig)
+#define __NR_fsmount 432
+__SYSCALL(__NR_fsmount, sys_fsmount)
+#define __NR_fspick 433
+__SYSCALL(__NR_fspick, sys_fspick)
#undef __NR_syscalls
-#define __NR_syscalls 295
+#define __NR_syscalls 434
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index 300f336633f2..661d73f9a919 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -649,6 +649,7 @@ struct drm_gem_open {
#define DRM_CAP_PAGE_FLIP_TARGET 0x11
#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
#define DRM_CAP_SYNCOBJ 0x13
+#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
@@ -735,8 +736,18 @@ struct drm_syncobj_handle {
__u32 pad;
};
+struct drm_syncobj_transfer {
+ __u32 src_handle;
+ __u32 dst_handle;
+ __u64 src_point;
+ __u64 dst_point;
+ __u32 flags;
+ __u32 pad;
+};
+
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
@@ -747,12 +758,33 @@ struct drm_syncobj_wait {
__u32 pad;
};
+struct drm_syncobj_timeline_wait {
+ __u64 handles;
+ /* wait on specific timeline point for every handles*/
+ __u64 points;
+ /* absolute timeout */
+ __s64 timeout_nsec;
+ __u32 count_handles;
+ __u32 flags;
+ __u32 first_signaled; /* only valid when not waiting all */
+ __u32 pad;
+};
+
+
struct drm_syncobj_array {
__u64 handles;
__u32 count_handles;
__u32 pad;
};
+struct drm_syncobj_timeline_array {
+ __u64 handles;
+ __u64 points;
+ __u32 count_handles;
+ __u32 pad;
+};
+
+
/* Query current scanout sequence number */
struct drm_crtc_get_sequence {
__u32 crtc_id; /* requested crtc_id */
@@ -909,6 +941,11 @@ extern "C" {
#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
+#define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
+#define DRM_IOCTL_SYNCOBJ_QUERY DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
+#define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer)
+#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
+
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 298b2e197744..3a73f5316766 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -63,6 +63,28 @@ extern "C" {
#define I915_RESET_UEVENT "RESET"
/*
+ * i915_user_extension: Base class for defining a chain of extensions
+ *
+ * Many interfaces need to grow over time. In most cases we can simply
+ * extend the struct and have userspace pass in more data. Another option,
+ * as demonstrated by Vulkan's approach to providing extensions for forward
+ * and backward compatibility, is to use a list of optional structs to
+ * provide those extra details.
+ *
+ * The key advantage to using an extension chain is that it allows us to
+ * redefine the interface more easily than an ever growing struct of
+ * increasing complexity, and for large parts of that interface to be
+ * entirely optional. The downside is more pointer chasing; chasing across
+ * the __user boundary with pointers encapsulated inside u64.
+ */
+struct i915_user_extension {
+ __u64 next_extension;
+ __u32 name;
+ __u32 flags; /* All undefined bits must be zero. */
+ __u32 rsvd[4]; /* Reserved for future use; must be zero. */
+};
+
+/*
* MOCS indexes used for GPU surfaces, defining the cacheability of the
* surface data and the coherency for this data wrt. CPU vs. GPU accesses.
*/
@@ -99,9 +121,23 @@ enum drm_i915_gem_engine_class {
I915_ENGINE_CLASS_VIDEO = 2,
I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
+ /* should be kept compact */
+
I915_ENGINE_CLASS_INVALID = -1
};
+/*
+ * There may be more than one engine fulfilling any role within the system.
+ * Each engine of a class is given a unique instance number and therefore
+ * any engine can be specified by its class:instance tuplet. APIs that allow
+ * access to any engine in the system will use struct i915_engine_class_instance
+ * for this identification.
+ */
+struct i915_engine_class_instance {
+ __u16 engine_class; /* see enum drm_i915_gem_engine_class */
+ __u16 engine_instance;
+};
+
/**
* DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
*
@@ -319,6 +355,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_PERF_ADD_CONFIG 0x37
#define DRM_I915_PERF_REMOVE_CONFIG 0x38
#define DRM_I915_QUERY 0x39
+/* Must be kept compact -- no holes */
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -367,6 +404,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
+#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
@@ -476,6 +514,7 @@ typedef struct drm_i915_irq_wait {
#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
+#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
#define I915_PARAM_HUC_STATUS 42
@@ -559,6 +598,8 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_MMAP_GTT_COHERENT 52
+/* Must be kept compact -- no holes and well documented */
+
typedef struct drm_i915_getparam {
__s32 param;
/*
@@ -574,6 +615,7 @@ typedef struct drm_i915_getparam {
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
#define I915_SETPARAM_NUM_USED_FENCES 4
+/* Must be kept compact -- no holes */
typedef struct drm_i915_setparam {
int param;
@@ -972,7 +1014,7 @@ struct drm_i915_gem_execbuffer2 {
* struct drm_i915_gem_exec_fence *fences.
*/
__u64 cliprects_ptr;
-#define I915_EXEC_RING_MASK (7<<0)
+#define I915_EXEC_RING_MASK (0x3f)
#define I915_EXEC_DEFAULT (0<<0)
#define I915_EXEC_RENDER (1<<0)
#define I915_EXEC_BSD (2<<0)
@@ -1120,32 +1162,34 @@ struct drm_i915_gem_busy {
* as busy may become idle before the ioctl is completed.
*
* Furthermore, if the object is busy, which engine is busy is only
- * provided as a guide. There are race conditions which prevent the
- * report of which engines are busy from being always accurate.
- * However, the converse is not true. If the object is idle, the
- * result of the ioctl, that all engines are idle, is accurate.
+ * provided as a guide and only indirectly by reporting its class
+ * (there may be more than one engine in each class). There are race
+ * conditions which prevent the report of which engines are busy from
+ * being always accurate. However, the converse is not true. If the
+ * object is idle, the result of the ioctl, that all engines are idle,
+ * is accurate.
*
* The returned dword is split into two fields to indicate both
- * the engines on which the object is being read, and the
- * engine on which it is currently being written (if any).
+ * the engine classess on which the object is being read, and the
+ * engine class on which it is currently being written (if any).
*
* The low word (bits 0:15) indicate if the object is being written
* to by any engine (there can only be one, as the GEM implicit
* synchronisation rules force writes to be serialised). Only the
- * engine for the last write is reported.
+ * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
+ * 1 not 0 etc) for the last write is reported.
*
- * The high word (bits 16:31) are a bitmask of which engines are
- * currently reading from the object. Multiple engines may be
+ * The high word (bits 16:31) are a bitmask of which engines classes
+ * are currently reading from the object. Multiple engines may be
* reading from the object simultaneously.
*
- * The value of each engine is the same as specified in the
- * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
- * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
- * the I915_EXEC_RENDER engine for execution, and so it is never
+ * The value of each engine class is the same as specified in the
+ * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
+ * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
* reported as active itself. Some hardware may have parallel
* execution engines, e.g. multiple media engines, which are
- * mapped to the same identifier in the EXECBUFFER2 ioctl and
- * so are not separately reported for busyness.
+ * mapped to the same class identifier and so are not separately
+ * reported for busyness.
*
* Caveat emptor:
* Only the boolean result of this query is reliable; that is whether
@@ -1412,16 +1456,158 @@ struct drm_i915_gem_wait {
};
struct drm_i915_gem_context_create {
- /* output: id of new context*/
- __u32 ctx_id;
+ __u32 ctx_id; /* output: id of new context*/
__u32 pad;
};
+struct drm_i915_gem_context_create_ext {
+ __u32 ctx_id; /* output: id of new context*/
+ __u32 flags;
+#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
+#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
+ (-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1))
+ __u64 extensions;
+};
+
+struct drm_i915_gem_context_param {
+ __u32 ctx_id;
+ __u32 size;
+ __u64 param;
+#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
+#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
+#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
+#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
+#define I915_CONTEXT_PARAM_BANNABLE 0x5
+#define I915_CONTEXT_PARAM_PRIORITY 0x6
+#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
+#define I915_CONTEXT_DEFAULT_PRIORITY 0
+#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
+ /*
+ * When using the following param, value should be a pointer to
+ * drm_i915_gem_context_param_sseu.
+ */
+#define I915_CONTEXT_PARAM_SSEU 0x7
+
+/*
+ * Not all clients may want to attempt automatic recover of a context after
+ * a hang (for example, some clients may only submit very small incremental
+ * batches relying on known logical state of previous batches which will never
+ * recover correctly and each attempt will hang), and so would prefer that
+ * the context is forever banned instead.
+ *
+ * If set to false (0), after a reset, subsequent (and in flight) rendering
+ * from this context is discarded, and the client will need to create a new
+ * context to use instead.
+ *
+ * If set to true (1), the kernel will automatically attempt to recover the
+ * context by skipping the hanging batch and executing the next batch starting
+ * from the default context state (discarding the incomplete logical context
+ * state lost due to the reset).
+ *
+ * On creation, all new contexts are marked as recoverable.
+ */
+#define I915_CONTEXT_PARAM_RECOVERABLE 0x8
+/* Must be kept compact -- no holes and well documented */
+
+ __u64 value;
+};
+
+/**
+ * Context SSEU programming
+ *
+ * It may be necessary for either functional or performance reason to configure
+ * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
+ * Sub-slice/EU).
+ *
+ * This is done by configuring SSEU configuration using the below
+ * @struct drm_i915_gem_context_param_sseu for every supported engine which
+ * userspace intends to use.
+ *
+ * Not all GPUs or engines support this functionality in which case an error
+ * code -ENODEV will be returned.
+ *
+ * Also, flexibility of possible SSEU configuration permutations varies between
+ * GPU generations and software imposed limitations. Requesting such a
+ * combination will return an error code of -EINVAL.
+ *
+ * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
+ * favour of a single global setting.
+ */
+struct drm_i915_gem_context_param_sseu {
+ /*
+ * Engine class & instance to be configured or queried.
+ */
+ struct i915_engine_class_instance engine;
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 flags;
+
+ /*
+ * Mask of slices to enable for the context. Valid values are a subset
+ * of the bitmask value returned for I915_PARAM_SLICE_MASK.
+ */
+ __u64 slice_mask;
+
+ /*
+ * Mask of subslices to enable for the context. Valid values are a
+ * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
+ */
+ __u64 subslice_mask;
+
+ /*
+ * Minimum/Maximum number of EUs to enable per subslice for the
+ * context. min_eus_per_subslice must be inferior or equal to
+ * max_eus_per_subslice.
+ */
+ __u16 min_eus_per_subslice;
+ __u16 max_eus_per_subslice;
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 rsvd;
+};
+
+struct drm_i915_gem_context_create_ext_setparam {
+#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
+ struct i915_user_extension base;
+ struct drm_i915_gem_context_param param;
+};
+
struct drm_i915_gem_context_destroy {
__u32 ctx_id;
__u32 pad;
};
+/*
+ * DRM_I915_GEM_VM_CREATE -
+ *
+ * Create a new virtual memory address space (ppGTT) for use within a context
+ * on the same file. Extensions can be provided to configure exactly how the
+ * address space is setup upon creation.
+ *
+ * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
+ * returned in the outparam @id.
+ *
+ * No flags are defined, with all bits reserved and must be zero.
+ *
+ * An extension chain maybe provided, starting with @extensions, and terminated
+ * by the @next_extension being 0. Currently, no extensions are defined.
+ *
+ * DRM_I915_GEM_VM_DESTROY -
+ *
+ * Destroys a previously created VM id, specified in @id.
+ *
+ * No extensions or flags are allowed currently, and so must be zero.
+ */
+struct drm_i915_gem_vm_control {
+ __u64 extensions;
+ __u32 flags;
+ __u32 vm_id;
+};
+
struct drm_i915_reg_read {
/*
* Register offset.
@@ -1434,6 +1620,7 @@ struct drm_i915_reg_read {
__u64 val; /* Return value */
};
+
/* Known registers:
*
* Render engine timestamp - 0x2358 + 64bit - gen7+
@@ -1473,22 +1660,6 @@ struct drm_i915_gem_userptr {
__u32 handle;
};
-struct drm_i915_gem_context_param {
- __u32 ctx_id;
- __u32 size;
- __u64 param;
-#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
-#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
-#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
-#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
-#define I915_CONTEXT_PARAM_BANNABLE 0x5
-#define I915_CONTEXT_PARAM_PRIORITY 0x6
-#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
-#define I915_CONTEXT_DEFAULT_PRIORITY 0
-#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
- __u64 value;
-};
-
enum drm_i915_oa_format {
I915_OA_FORMAT_A13 = 1, /* HSW only */
I915_OA_FORMAT_A29, /* HSW only */
@@ -1650,6 +1821,7 @@ struct drm_i915_perf_oa_config {
struct drm_i915_query_item {
__u64 query_id;
#define DRM_I915_QUERY_TOPOLOGY_INFO 1
+/* Must be kept compact -- no holes and well documented */
/*
* When set to zero by userspace, this is filled with the size of the
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 91c43884f295..a8b823c30b43 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -14,6 +14,7 @@
/* Extended instruction set based on top of classic BPF */
/* instruction classes */
+#define BPF_JMP32 0x06 /* jmp mode in word width */
#define BPF_ALU64 0x07 /* alu mode in double word width */
/* ld/ldx fields */
@@ -104,6 +105,7 @@ enum bpf_cmd {
BPF_BTF_GET_FD_BY_ID,
BPF_TASK_FD_QUERY,
BPF_MAP_LOOKUP_AND_DELETE_ELEM,
+ BPF_MAP_FREEZE,
};
enum bpf_map_type {
@@ -131,6 +133,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
BPF_MAP_TYPE_QUEUE,
BPF_MAP_TYPE_STACK,
+ BPF_MAP_TYPE_SK_STORAGE,
};
/* Note that tracing related programs such as
@@ -165,6 +168,8 @@ enum bpf_prog_type {
BPF_PROG_TYPE_LIRC_MODE2,
BPF_PROG_TYPE_SK_REUSEPORT,
BPF_PROG_TYPE_FLOW_DISSECTOR,
+ BPF_PROG_TYPE_CGROUP_SYSCTL,
+ BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
};
enum bpf_attach_type {
@@ -186,6 +191,9 @@ enum bpf_attach_type {
BPF_CGROUP_UDP6_SENDMSG,
BPF_LIRC_MODE2,
BPF_FLOW_DISSECTOR,
+ BPF_CGROUP_SYSCTL,
+ BPF_CGROUP_UDP4_RECVMSG,
+ BPF_CGROUP_UDP6_RECVMSG,
__MAX_BPF_ATTACH_TYPE
};
@@ -254,8 +262,19 @@ enum bpf_attach_type {
*/
#define BPF_F_ANY_ALIGNMENT (1U << 1)
-/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
+/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
+ * two extensions:
+ *
+ * insn[0].src_reg: BPF_PSEUDO_MAP_FD BPF_PSEUDO_MAP_VALUE
+ * insn[0].imm: map fd map fd
+ * insn[1].imm: 0 offset into value
+ * insn[0].off: 0 0
+ * insn[1].off: 0 0
+ * ldimm64 rewrite: address of map address of map[0]+offset
+ * verifier type: CONST_PTR_TO_MAP PTR_TO_MAP_VALUE
+ */
#define BPF_PSEUDO_MAP_FD 1
+#define BPF_PSEUDO_MAP_VALUE 2
/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
* offset to another bpf function
@@ -266,6 +285,7 @@ enum bpf_attach_type {
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
#define BPF_EXIST 2 /* update existing element */
+#define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */
/* flags for BPF_MAP_CREATE command */
#define BPF_F_NO_PREALLOC (1U << 0)
@@ -281,7 +301,7 @@ enum bpf_attach_type {
#define BPF_OBJ_NAME_LEN 16U
-/* Flags for accessing BPF object */
+/* Flags for accessing BPF object from syscall side. */
#define BPF_F_RDONLY (1U << 3)
#define BPF_F_WRONLY (1U << 4)
@@ -291,6 +311,10 @@ enum bpf_attach_type {
/* Zero-initialize hash function seed. This should only be used for testing. */
#define BPF_F_ZERO_SEED (1U << 6)
+/* Flags for accessing BPF object from program side. */
+#define BPF_F_RDONLY_PROG (1U << 7)
+#define BPF_F_WRONLY_PROG (1U << 8)
+
/* flags for BPF_PROG_QUERY */
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
@@ -394,6 +418,13 @@ union bpf_attr {
__aligned_u64 data_out;
__u32 repeat;
__u32 duration;
+ __u32 ctx_size_in; /* input: len of ctx_in */
+ __u32 ctx_size_out; /* input/output: len of ctx_out
+ * returns ENOSPC if ctx_out
+ * is too small.
+ */
+ __aligned_u64 ctx_in;
+ __aligned_u64 ctx_out;
} test;
struct { /* anonymous struct used by BPF_*_GET_*_ID */
@@ -500,16 +531,6 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
- * Description
- * Push an element *value* in *map*. *flags* is one of:
- *
- * **BPF_EXIST**
- * If the queue/stack is full, the oldest element is removed to
- * make room for this.
- * Return
- * 0 on success, or a negative error in case of failure.
- *
* int bpf_probe_read(void *dst, u32 size, const void *src)
* Description
* For tracing programs, safely attempt to read *size* bytes from
@@ -610,7 +631,7 @@ union bpf_attr {
* **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
* **->swhash** and *skb*\ **->l4hash** to 0).
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -635,7 +656,7 @@ union bpf_attr {
* flexibility and can handle sizes larger than 2 or 4 for the
* checksum to update.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -667,7 +688,7 @@ union bpf_attr {
* flexibility and can handle sizes larger than 2 or 4 for the
* checksum to update.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -722,7 +743,7 @@ union bpf_attr {
* efficient, but it is handled through an action code where the
* redirection happens only after the eBPF program has returned.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -787,7 +808,7 @@ union bpf_attr {
* **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
* be **ETH_P_8021Q**.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -799,7 +820,7 @@ union bpf_attr {
* Description
* Pop a VLAN header from the packet associated to *skb*.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1149,7 +1170,7 @@ union bpf_attr {
* All values for *flags* are reserved for future usage, and must
* be left at zero.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1262,7 +1283,7 @@ union bpf_attr {
* implicitly linearizes, unclones and drops offloads from the
* *skb*.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1298,7 +1319,7 @@ union bpf_attr {
* **bpf_skb_pull_data()** to effectively unclone the *skb* from
* the very beginning in case it is indeed cloned.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1350,7 +1371,7 @@ union bpf_attr {
* All values for *flags* are reserved for future usage, and must
* be left at zero.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1365,7 +1386,7 @@ union bpf_attr {
* can be used to prepare the packet for pushing or popping
* headers.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1433,14 +1454,14 @@ union bpf_attr {
* u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
* Description
* Equivalent to bpf_get_socket_cookie() helper that accepts
- * *skb*, but gets socket from **struct bpf_sock_addr** contex.
+ * *skb*, but gets socket from **struct bpf_sock_addr** context.
* Return
* A 8-byte long non-decreasing number.
*
* u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
* Description
* Equivalent to bpf_get_socket_cookie() helper that accepts
- * *skb*, but gets socket from **struct bpf_sock_ops** contex.
+ * *skb*, but gets socket from **struct bpf_sock_ops** context.
* Return
* A 8-byte long non-decreasing number.
*
@@ -1486,15 +1507,33 @@ union bpf_attr {
* Grow or shrink the room for data in the packet associated to
* *skb* by *len_diff*, and according to the selected *mode*.
*
- * There is a single supported mode at this time:
+ * There are two supported modes at this time:
+ *
+ * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
+ * (room space is added or removed below the layer 2 header).
*
* * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
* (room space is added or removed below the layer 3 header).
*
- * All values for *flags* are reserved for future usage, and must
- * be left at zero.
+ * The following flags are supported at this time:
+ *
+ * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size.
+ * Adjusting mss in this way is not allowed for datagrams.
+ *
+ * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**,
+ * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**:
+ * Any new space is reserved to hold a tunnel header.
+ * Configure skb offsets and other fields accordingly.
*
- * A call to this helper is susceptible to change the underlaying
+ * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**,
+ * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**:
+ * Use with ENCAP_L3 flags to further specify the tunnel type.
+ *
+ * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*):
+ * Use with ENCAP_L3/L4 flags to further specify the tunnel
+ * type; *len* is the length of the inner MAC header.
+ *
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1573,7 +1612,7 @@ union bpf_attr {
* more flexibility as the user is free to store whatever meta
* data they need.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1702,12 +1741,19 @@ union bpf_attr {
* error if an eBPF program tries to set a callback that is not
* supported in the current kernel.
*
- * The supported callback values that *argval* can combine are:
+ * *argval* is a flag array which can combine these flags:
*
* * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
* * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
* * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
*
+ * Therefore, this function can be used to clear a callback flag by
+ * setting the appropriate bit to zero. e.g. to disable the RTO
+ * callback:
+ *
+ * **bpf_sock_ops_cb_flags_set(bpf_sock,**
+ * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)**
+ *
* Here are some examples of where one could call such eBPF
* program:
*
@@ -1808,7 +1854,7 @@ union bpf_attr {
* copied if necessary (i.e. if data was not linear and if start
* and end pointers do not point to the same chunk).
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1842,7 +1888,7 @@ union bpf_attr {
* only possible to shrink the packet as of this writing,
* therefore *delta* must be a negative integer.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -2014,8 +2060,21 @@ union bpf_attr {
* Only works if *skb* contains an IPv6 packet. Insert a
* Segment Routing Header (**struct ipv6_sr_hdr**) inside
* the IPv6 header.
- *
- * A call to this helper is susceptible to change the underlaying
+ * **BPF_LWT_ENCAP_IP**
+ * IP encapsulation (GRE/GUE/IPIP/etc). The outer header
+ * must be IPv4 or IPv6, followed by zero or more
+ * additional headers, up to **LWT_BPF_MAX_HEADROOM**
+ * total bytes in all prepended headers. Please note that
+ * if **skb_is_gso**\ (*skb*) is true, no more than two
+ * headers can be prepended, and the inner header, if
+ * present, should be either GRE or UDP/GUE.
+ *
+ * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs
+ * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can
+ * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and
+ * **BPF_PROG_TYPE_LWT_XMIT**.
+ *
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -2030,7 +2089,7 @@ union bpf_attr {
* inside the outermost IPv6 Segment Routing Header can be
* modified through this helper.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -2046,7 +2105,7 @@ union bpf_attr {
* after the segments are accepted. *delta* can be as well
* positive (growing) as negative (shrinking).
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -2069,13 +2128,13 @@ union bpf_attr {
* Type of *param*: **int**.
* **SEG6_LOCAL_ACTION_END_B6**
* End.B6 action: Endpoint bound to an SRv6 policy.
- * Type of param: **struct ipv6_sr_hdr**.
+ * Type of *param*: **struct ipv6_sr_hdr**.
* **SEG6_LOCAL_ACTION_END_B6_ENCAP**
* End.B6.Encap action: Endpoint bound to an SRv6
* encapsulation policy.
- * Type of param: **struct ipv6_sr_hdr**.
+ * Type of *param*: **struct ipv6_sr_hdr**.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -2083,52 +2142,52 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
+ * int bpf_rc_repeat(void *ctx)
* Description
* This helper is used in programs implementing IR decoding, to
- * report a successfully decoded key press with *scancode*,
- * *toggle* value in the given *protocol*. The scancode will be
- * translated to a keycode using the rc keymap, and reported as
- * an input key down event. After a period a key up event is
- * generated. This period can be extended by calling either
- * **bpf_rc_keydown**\ () again with the same values, or calling
- * **bpf_rc_repeat**\ ().
+ * report a successfully decoded repeat key message. This delays
+ * the generation of a key up event for previously generated
+ * key down event.
*
- * Some protocols include a toggle bit, in case the button was
- * released and pressed again between consecutive scancodes.
+ * Some IR protocols like NEC have a special IR message for
+ * repeating last button, for when a button is held down.
*
* The *ctx* should point to the lirc sample as passed into
* the program.
*
- * The *protocol* is the decoded protocol number (see
- * **enum rc_proto** for some predefined values).
- *
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
* Return
* 0
*
- * int bpf_rc_repeat(void *ctx)
+ * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
* Description
* This helper is used in programs implementing IR decoding, to
- * report a successfully decoded repeat key message. This delays
- * the generation of a key up event for previously generated
- * key down event.
+ * report a successfully decoded key press with *scancode*,
+ * *toggle* value in the given *protocol*. The scancode will be
+ * translated to a keycode using the rc keymap, and reported as
+ * an input key down event. After a period a key up event is
+ * generated. This period can be extended by calling either
+ * **bpf_rc_keydown**\ () again with the same values, or calling
+ * **bpf_rc_repeat**\ ().
*
- * Some IR protocols like NEC have a special IR message for
- * repeating last button, for when a button is held down.
+ * Some protocols include a toggle bit, in case the button was
+ * released and pressed again between consecutive scancodes.
*
* The *ctx* should point to the lirc sample as passed into
* the program.
*
+ * The *protocol* is the decoded protocol number (see
+ * **enum rc_proto** for some predefined values).
+ *
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
* Return
* 0
*
- * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
+ * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
* Description
* Return the cgroup v2 id of the socket associated with the *skb*.
* This is roughly similar to the **bpf_get_cgroup_classid**\ ()
@@ -2144,30 +2203,12 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
- * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
- * Description
- * Return id of cgroup v2 that is ancestor of cgroup associated
- * with the *skb* at the *ancestor_level*. The root cgroup is at
- * *ancestor_level* zero and each step down the hierarchy
- * increments the level. If *ancestor_level* == level of cgroup
- * associated with *skb*, then return value will be same as that
- * of **bpf_skb_cgroup_id**\ ().
- *
- * The helper is useful to implement policies based on cgroups
- * that are upper in hierarchy than immediate cgroup associated
- * with *skb*.
- *
- * The format of returned id and helper limitations are same as in
- * **bpf_skb_cgroup_id**\ ().
- * Return
- * The id is returned or 0 in case the id could not be retrieved.
- *
* u64 bpf_get_current_cgroup_id(void)
* Return
* A 64-bit integer containing the current cgroup id based
* on the cgroup within which the current task is running.
*
- * void* get_local_storage(void *map, u64 flags)
+ * void *bpf_get_local_storage(void *map, u64 flags)
* Description
* Get the pointer to the local storage area.
* The type and the size of the local storage is defined
@@ -2194,6 +2235,24 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
+ * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
+ * Description
+ * Return id of cgroup v2 that is ancestor of cgroup associated
+ * with the *skb* at the *ancestor_level*. The root cgroup is at
+ * *ancestor_level* zero and each step down the hierarchy
+ * increments the level. If *ancestor_level* == level of cgroup
+ * associated with *skb*, then return value will be same as that
+ * of **bpf_skb_cgroup_id**\ ().
+ *
+ * The helper is useful to implement policies based on cgroups
+ * that are upper in hierarchy than immediate cgroup associated
+ * with *skb*.
+ *
+ * The format of returned id and helper limitations are same as in
+ * **bpf_skb_cgroup_id**\ ().
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
* struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for TCP socket matching *tuple*, optionally in a child
@@ -2228,7 +2287,8 @@ union bpf_attr {
* Return
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
* For sockets with reuseport option, the **struct bpf_sock**
- * result is from **reuse->socks**\ [] using the hash of the tuple.
+ * result is from *reuse*\ **->socks**\ [] using the hash of the
+ * tuple.
*
* struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
@@ -2264,7 +2324,8 @@ union bpf_attr {
* Return
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
* For sockets with reuseport option, the **struct bpf_sock**
- * result is from **reuse->socks**\ [] using the hash of the tuple.
+ * result is from *reuse*\ **->socks**\ [] using the hash of the
+ * tuple.
*
* int bpf_sk_release(struct bpf_sock *sock)
* Description
@@ -2274,6 +2335,16 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
+ * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ * Description
+ * Push an element *value* in *map*. *flags* is one of:
+ *
+ * **BPF_EXIST**
+ * If the queue/stack is full, the oldest element is
+ * removed to make room for this.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
* int bpf_map_pop_elem(struct bpf_map *map, void *value)
* Description
* Pop an element from *map*.
@@ -2327,6 +2398,282 @@ union bpf_attr {
* "**y**".
* Return
* 0
+ *
+ * int bpf_spin_lock(struct bpf_spin_lock *lock)
+ * Description
+ * Acquire a spinlock represented by the pointer *lock*, which is
+ * stored as part of a value of a map. Taking the lock allows to
+ * safely update the rest of the fields in that value. The
+ * spinlock can (and must) later be released with a call to
+ * **bpf_spin_unlock**\ (\ *lock*\ ).
+ *
+ * Spinlocks in BPF programs come with a number of restrictions
+ * and constraints:
+ *
+ * * **bpf_spin_lock** objects are only allowed inside maps of
+ * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
+ * list could be extended in the future).
+ * * BTF description of the map is mandatory.
+ * * The BPF program can take ONE lock at a time, since taking two
+ * or more could cause dead locks.
+ * * Only one **struct bpf_spin_lock** is allowed per map element.
+ * * When the lock is taken, calls (either BPF to BPF or helpers)
+ * are not allowed.
+ * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
+ * allowed inside a spinlock-ed region.
+ * * The BPF program MUST call **bpf_spin_unlock**\ () to release
+ * the lock, on all execution paths, before it returns.
+ * * The BPF program can access **struct bpf_spin_lock** only via
+ * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
+ * helpers. Loading or storing data into the **struct
+ * bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
+ * * To use the **bpf_spin_lock**\ () helper, the BTF description
+ * of the map value must be a struct and have **struct
+ * bpf_spin_lock** *anyname*\ **;** field at the top level.
+ * Nested lock inside another struct is not allowed.
+ * * The **struct bpf_spin_lock** *lock* field in a map value must
+ * be aligned on a multiple of 4 bytes in that value.
+ * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
+ * the **bpf_spin_lock** field to user space.
+ * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
+ * a BPF program, do not update the **bpf_spin_lock** field.
+ * * **bpf_spin_lock** cannot be on the stack or inside a
+ * networking packet (it can only be inside of a map values).
+ * * **bpf_spin_lock** is available to root only.
+ * * Tracing programs and socket filter programs cannot use
+ * **bpf_spin_lock**\ () due to insufficient preemption checks
+ * (but this may change in the future).
+ * * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
+ * Return
+ * 0
+ *
+ * int bpf_spin_unlock(struct bpf_spin_lock *lock)
+ * Description
+ * Release the *lock* previously locked by a call to
+ * **bpf_spin_lock**\ (\ *lock*\ ).
+ * Return
+ * 0
+ *
+ * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
+ * Description
+ * This helper gets a **struct bpf_sock** pointer such
+ * that all the fields in this **bpf_sock** can be accessed.
+ * Return
+ * A **struct bpf_sock** pointer on success, or **NULL** in
+ * case of failure.
+ *
+ * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
+ * Description
+ * This helper gets a **struct bpf_tcp_sock** pointer from a
+ * **struct bpf_sock** pointer.
+ * Return
+ * A **struct bpf_tcp_sock** pointer on success, or **NULL** in
+ * case of failure.
+ *
+ * int bpf_skb_ecn_set_ce(struct sk_buf *skb)
+ * Description
+ * Set ECN (Explicit Congestion Notification) field of IP header
+ * to **CE** (Congestion Encountered) if current value is **ECT**
+ * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
+ * and IPv4.
+ * Return
+ * 1 if the **CE** flag is set (either by the current helper call
+ * or because it was already present), 0 if it is not set.
+ *
+ * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
+ * Description
+ * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
+ * **bpf_sk_release**\ () is unnecessary and not allowed.
+ * Return
+ * A **struct bpf_sock** pointer on success, or **NULL** in
+ * case of failure.
+ *
+ * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
+ * Description
+ * Look for TCP socket matching *tuple*, optionally in a child
+ * network namespace *netns*. The return value must be checked,
+ * and if non-**NULL**, released via **bpf_sk_release**\ ().
+ *
+ * This function is identical to **bpf_sk_lookup_tcp**\ (), except
+ * that it also returns timewait or request sockets. Use
+ * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the
+ * full structure.
+ *
+ * This helper is available only if the kernel was compiled with
+ * **CONFIG_NET** configuration option.
+ * Return
+ * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
+ * For sockets with reuseport option, the **struct bpf_sock**
+ * result is from *reuse*\ **->socks**\ [] using the hash of the
+ * tuple.
+ *
+ * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
+ * Description
+ * Check whether *iph* and *th* contain a valid SYN cookie ACK for
+ * the listening socket in *sk*.
+ *
+ * *iph* points to the start of the IPv4 or IPv6 header, while
+ * *iph_len* contains **sizeof**\ (**struct iphdr**) or
+ * **sizeof**\ (**struct ip6hdr**).
+ *
+ * *th* points to the start of the TCP header, while *th_len*
+ * contains **sizeof**\ (**struct tcphdr**).
+ *
+ * Return
+ * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
+ * error otherwise.
+ *
+ * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
+ * Description
+ * Get name of sysctl in /proc/sys/ and copy it into provided by
+ * program buffer *buf* of size *buf_len*.
+ *
+ * The buffer is always NUL terminated, unless it's zero-sized.
+ *
+ * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is
+ * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name
+ * only (e.g. "tcp_mem").
+ * Return
+ * Number of character copied (not including the trailing NUL).
+ *
+ * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
+ * truncated name in this case).
+ *
+ * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
+ * Description
+ * Get current value of sysctl as it is presented in /proc/sys
+ * (incl. newline, etc), and copy it as a string into provided
+ * by program buffer *buf* of size *buf_len*.
+ *
+ * The whole value is copied, no matter what file position user
+ * space issued e.g. sys_read at.
+ *
+ * The buffer is always NUL terminated, unless it's zero-sized.
+ * Return
+ * Number of character copied (not including the trailing NUL).
+ *
+ * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
+ * truncated name in this case).
+ *
+ * **-EINVAL** if current value was unavailable, e.g. because
+ * sysctl is uninitialized and read returns -EIO for it.
+ *
+ * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
+ * Description
+ * Get new value being written by user space to sysctl (before
+ * the actual write happens) and copy it as a string into
+ * provided by program buffer *buf* of size *buf_len*.
+ *
+ * User space may write new value at file position > 0.
+ *
+ * The buffer is always NUL terminated, unless it's zero-sized.
+ * Return
+ * Number of character copied (not including the trailing NUL).
+ *
+ * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
+ * truncated name in this case).
+ *
+ * **-EINVAL** if sysctl is being read.
+ *
+ * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
+ * Description
+ * Override new value being written by user space to sysctl with
+ * value provided by program in buffer *buf* of size *buf_len*.
+ *
+ * *buf* should contain a string in same form as provided by user
+ * space on sysctl write.
+ *
+ * User space may write new value at file position > 0. To override
+ * the whole sysctl value file position should be set to zero.
+ * Return
+ * 0 on success.
+ *
+ * **-E2BIG** if the *buf_len* is too big.
+ *
+ * **-EINVAL** if sysctl is being read.
+ *
+ * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
+ * Description
+ * Convert the initial part of the string from buffer *buf* of
+ * size *buf_len* to a long integer according to the given base
+ * and save the result in *res*.
+ *
+ * The string may begin with an arbitrary amount of white space
+ * (as determined by **isspace**\ (3)) followed by a single
+ * optional '**-**' sign.
+ *
+ * Five least significant bits of *flags* encode base, other bits
+ * are currently unused.
+ *
+ * Base must be either 8, 10, 16 or 0 to detect it automatically
+ * similar to user space **strtol**\ (3).
+ * Return
+ * Number of characters consumed on success. Must be positive but
+ * no more than *buf_len*.
+ *
+ * **-EINVAL** if no valid digits were found or unsupported base
+ * was provided.
+ *
+ * **-ERANGE** if resulting value was out of range.
+ *
+ * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
+ * Description
+ * Convert the initial part of the string from buffer *buf* of
+ * size *buf_len* to an unsigned long integer according to the
+ * given base and save the result in *res*.
+ *
+ * The string may begin with an arbitrary amount of white space
+ * (as determined by **isspace**\ (3)).
+ *
+ * Five least significant bits of *flags* encode base, other bits
+ * are currently unused.
+ *
+ * Base must be either 8, 10, 16 or 0 to detect it automatically
+ * similar to user space **strtoul**\ (3).
+ * Return
+ * Number of characters consumed on success. Must be positive but
+ * no more than *buf_len*.
+ *
+ * **-EINVAL** if no valid digits were found or unsupported base
+ * was provided.
+ *
+ * **-ERANGE** if resulting value was out of range.
+ *
+ * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags)
+ * Description
+ * Get a bpf-local-storage from a *sk*.
+ *
+ * Logically, it could be thought of getting the value from
+ * a *map* with *sk* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this
+ * helper enforces the key must be a full socket and the map must
+ * be a **BPF_MAP_TYPE_SK_STORAGE** also.
+ *
+ * Underneath, the value is stored locally at *sk* instead of
+ * the *map*. The *map* is used as the bpf-local-storage
+ * "type". The bpf-local-storage "type" (i.e. the *map*) is
+ * searched against all bpf-local-storages residing at *sk*.
+ *
+ * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
+ * used such that a new bpf-local-storage will be
+ * created if one does not exist. *value* can be used
+ * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify
+ * the initial value of a bpf-local-storage. If *value* is
+ * **NULL**, the new bpf-local-storage will be zero initialized.
+ * Return
+ * A bpf-local-storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf-local-storage.
+ *
+ * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
+ * Description
+ * Delete a bpf-local-storage from a *sk*.
+ * Return
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf-local-storage cannot be found.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2421,7 +2768,23 @@ union bpf_attr {
FN(map_peek_elem), \
FN(msg_push_data), \
FN(msg_pop_data), \
- FN(rc_pointer_rel),
+ FN(rc_pointer_rel), \
+ FN(spin_lock), \
+ FN(spin_unlock), \
+ FN(sk_fullsock), \
+ FN(tcp_sock), \
+ FN(skb_ecn_set_ce), \
+ FN(get_listener_sock), \
+ FN(skc_lookup_tcp), \
+ FN(tcp_check_syncookie), \
+ FN(sysctl_get_name), \
+ FN(sysctl_get_current_value), \
+ FN(sysctl_get_new_value), \
+ FN(sysctl_set_new_value), \
+ FN(strtol), \
+ FN(strtoul), \
+ FN(sk_storage_get), \
+ FN(sk_storage_delete),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2480,9 +2843,30 @@ enum bpf_func_id {
/* Current network namespace */
#define BPF_F_CURRENT_NETNS (-1L)
+/* BPF_FUNC_skb_adjust_room flags. */
+#define BPF_F_ADJ_ROOM_FIXED_GSO (1ULL << 0)
+
+#define BPF_ADJ_ROOM_ENCAP_L2_MASK 0xff
+#define BPF_ADJ_ROOM_ENCAP_L2_SHIFT 56
+
+#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 (1ULL << 1)
+#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 (1ULL << 2)
+#define BPF_F_ADJ_ROOM_ENCAP_L4_GRE (1ULL << 3)
+#define BPF_F_ADJ_ROOM_ENCAP_L4_UDP (1ULL << 4)
+#define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \
+ BPF_ADJ_ROOM_ENCAP_L2_MASK) \
+ << BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
+
+/* BPF_FUNC_sysctl_get_name flags. */
+#define BPF_F_SYSCTL_BASE_NAME (1ULL << 0)
+
+/* BPF_FUNC_sk_storage_get flags */
+#define BPF_SK_STORAGE_GET_F_CREATE (1ULL << 0)
+
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
+ BPF_ADJ_ROOM_MAC,
};
/* Mode for BPF_FUNC_skb_load_bytes_relative helper. */
@@ -2494,7 +2878,8 @@ enum bpf_hdr_start_off {
/* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */
enum bpf_lwt_encap_mode {
BPF_LWT_ENCAP_SEG6,
- BPF_LWT_ENCAP_SEG6_INLINE
+ BPF_LWT_ENCAP_SEG6_INLINE,
+ BPF_LWT_ENCAP_IP,
};
#define __bpf_md_ptr(type, name) \
@@ -2540,6 +2925,8 @@ struct __sk_buff {
__bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
__u64 tstamp;
__u32 wire_len;
+ __u32 gso_segs;
+ __bpf_md_ptr(struct bpf_sock *, sk);
};
struct bpf_tunnel_key {
@@ -2581,7 +2968,15 @@ enum bpf_ret_code {
BPF_DROP = 2,
/* 3-6 reserved */
BPF_REDIRECT = 7,
- /* >127 are reserved for prog type specific return codes */
+ /* >127 are reserved for prog type specific return codes.
+ *
+ * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and
+ * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been
+ * changed and should be routed based on its new L3 header.
+ * (This is an L3 redirect, as opposed to L2 redirect
+ * represented by BPF_REDIRECT above).
+ */
+ BPF_LWT_REROUTE = 128,
};
struct bpf_sock {
@@ -2591,14 +2986,52 @@ struct bpf_sock {
__u32 protocol;
__u32 mark;
__u32 priority;
- __u32 src_ip4; /* Allows 1,2,4-byte read.
- * Stored in network byte order.
+ /* IP address also allows 1 and 2 bytes access */
+ __u32 src_ip4;
+ __u32 src_ip6[4];
+ __u32 src_port; /* host byte order */
+ __u32 dst_port; /* network byte order */
+ __u32 dst_ip4;
+ __u32 dst_ip6[4];
+ __u32 state;
+};
+
+struct bpf_tcp_sock {
+ __u32 snd_cwnd; /* Sending congestion window */
+ __u32 srtt_us; /* smoothed round trip time << 3 in usecs */
+ __u32 rtt_min;
+ __u32 snd_ssthresh; /* Slow start size threshold */
+ __u32 rcv_nxt; /* What we want to receive next */
+ __u32 snd_nxt; /* Next sequence we send */
+ __u32 snd_una; /* First byte we want an ack for */
+ __u32 mss_cache; /* Cached effective mss, not including SACKS */
+ __u32 ecn_flags; /* ECN status bits. */
+ __u32 rate_delivered; /* saved rate sample: packets delivered */
+ __u32 rate_interval_us; /* saved rate sample: time elapsed */
+ __u32 packets_out; /* Packets which are "in flight" */
+ __u32 retrans_out; /* Retransmitted packets out */
+ __u32 total_retrans; /* Total retransmits for entire connection */
+ __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
+ * total number of segments in.
*/
- __u32 src_ip6[4]; /* Allows 1,2,4-byte read.
- * Stored in network byte order.
+ __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn
+ * total number of data segments in.
+ */
+ __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
+ * The total number of segments sent.
+ */
+ __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
+ * total number of data segments sent.
+ */
+ __u32 lost_out; /* Lost packets */
+ __u32 sacked_out; /* SACK'd packets */
+ __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived
+ * sum(delta(rcv_nxt)), or how many bytes
+ * were acked.
*/
- __u32 src_port; /* Allows 4-byte read.
- * Stored in host byte order
+ __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
+ * sum(delta(snd_una)), or how many bytes
+ * were acked.
*/
};
@@ -2728,6 +3161,8 @@ struct bpf_prog_info {
__u32 jited_line_info_rec_size;
__u32 nr_prog_tags;
__aligned_u64 prog_tags;
+ __u64 run_time_ns;
+ __u64 run_cnt;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -2943,8 +3378,8 @@ struct bpf_raw_tracepoint_args {
/* DIRECT: Skip the FIB rules and go to FIB table associated with device
* OUTPUT: Do lookup from egress perspective; default is ingress
*/
-#define BPF_FIB_LOOKUP_DIRECT BIT(0)
-#define BPF_FIB_LOOKUP_OUTPUT BIT(1)
+#define BPF_FIB_LOOKUP_DIRECT (1U << 0)
+#define BPF_FIB_LOOKUP_OUTPUT (1U << 1)
enum {
BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
@@ -3054,4 +3489,17 @@ struct bpf_line_info {
__u32 line_col;
};
+struct bpf_spin_lock {
+ __u32 val;
+};
+
+struct bpf_sysctl {
+ __u32 write; /* Sysctl is being read (= 0) or written (= 1).
+ * Allows 1,2,4-byte read, but no write.
+ */
+ __u32 file_pos; /* Sysctl file position to read from, write to.
+ * Allows 1,2,4-byte read an 4-byte write.
+ */
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/tools/include/uapi/linux/btf.h b/tools/include/uapi/linux/btf.h
index 7b7475ef2f17..63ae4a39e58b 100644
--- a/tools/include/uapi/linux/btf.h
+++ b/tools/include/uapi/linux/btf.h
@@ -39,11 +39,11 @@ struct btf_type {
* struct, union and fwd
*/
__u32 info;
- /* "size" is used by INT, ENUM, STRUCT and UNION.
+ /* "size" is used by INT, ENUM, STRUCT, UNION and DATASEC.
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
- * FUNC and FUNC_PROTO.
+ * FUNC, FUNC_PROTO and VAR.
* "type" is a type_id referring to another type.
*/
union {
@@ -70,8 +70,10 @@ struct btf_type {
#define BTF_KIND_RESTRICT 11 /* Restrict */
#define BTF_KIND_FUNC 12 /* Function */
#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
-#define BTF_KIND_MAX 13
-#define NR_BTF_KINDS 14
+#define BTF_KIND_VAR 14 /* Variable */
+#define BTF_KIND_DATASEC 15 /* Section */
+#define BTF_KIND_MAX BTF_KIND_DATASEC
+#define NR_BTF_KINDS (BTF_KIND_MAX + 1)
/* For some specific BTF_KIND, "struct btf_type" is immediately
* followed by extra data.
@@ -81,7 +83,7 @@ struct btf_type {
* is the 32 bits arrangement:
*/
#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24)
-#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16)
+#define BTF_INT_OFFSET(VAL) (((VAL) & 0x00ff0000) >> 16)
#define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff)
/* Attributes stored in the BTF_INT_ENCODING */
@@ -138,4 +140,26 @@ struct btf_param {
__u32 type;
};
+enum {
+ BTF_VAR_STATIC = 0,
+ BTF_VAR_GLOBAL_ALLOCATED,
+};
+
+/* BTF_KIND_VAR is followed by a single "struct btf_var" to describe
+ * additional information related to the variable such as its linkage.
+ */
+struct btf_var {
+ __u32 linkage;
+};
+
+/* BTF_KIND_DATASEC is followed by multiple "struct btf_var_secinfo"
+ * to describe all BTF_KIND_VAR types it contains along with it's
+ * in-section offset as well as size.
+ */
+struct btf_var_secinfo {
+ __u32 type;
+ __u32 offset;
+ __u32 size;
+};
+
#endif /* _UAPI__LINUX_BTF_H__ */
diff --git a/tools/include/uapi/linux/ethtool.h b/tools/include/uapi/linux/ethtool.h
new file mode 100644
index 000000000000..c86c3e942df9
--- /dev/null
+++ b/tools/include/uapi/linux/ethtool.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * ethtool.h: Defines for Linux ethtool.
+ *
+ * Copyright (C) 1998 David S. Miller (davem@redhat.com)
+ * Copyright 2001 Jeff Garzik <jgarzik@pobox.com>
+ * Portions Copyright 2001 Sun Microsystems (thockin@sun.com)
+ * Portions Copyright 2002 Intel (eli.kupermann@intel.com,
+ * christopher.leech@intel.com,
+ * scott.feldman@intel.com)
+ * Portions Copyright (C) Sun Microsystems 2008
+ */
+
+#ifndef _UAPI_LINUX_ETHTOOL_H
+#define _UAPI_LINUX_ETHTOOL_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define ETHTOOL_GCHANNELS 0x0000003c /* Get no of channels */
+
+/**
+ * struct ethtool_channels - configuring number of network channel
+ * @cmd: ETHTOOL_{G,S}CHANNELS
+ * @max_rx: Read only. Maximum number of receive channel the driver support.
+ * @max_tx: Read only. Maximum number of transmit channel the driver support.
+ * @max_other: Read only. Maximum number of other channel the driver support.
+ * @max_combined: Read only. Maximum number of combined channel the driver
+ * support. Set of queues RX, TX or other.
+ * @rx_count: Valid values are in the range 1 to the max_rx.
+ * @tx_count: Valid values are in the range 1 to the max_tx.
+ * @other_count: Valid values are in the range 1 to the max_other.
+ * @combined_count: Valid values are in the range 1 to the max_combined.
+ *
+ * This can be used to configure RX, TX and other channels.
+ */
+
+struct ethtool_channels {
+ __u32 cmd;
+ __u32 max_rx;
+ __u32 max_tx;
+ __u32 max_other;
+ __u32 max_combined;
+ __u32 rx_count;
+ __u32 tx_count;
+ __u32 other_count;
+ __u32 combined_count;
+};
+
+#endif /* _UAPI_LINUX_ETHTOOL_H */
diff --git a/tools/include/uapi/linux/fcntl.h b/tools/include/uapi/linux/fcntl.h
index 6448cdd9a350..1d338357df8a 100644
--- a/tools/include/uapi/linux/fcntl.h
+++ b/tools/include/uapi/linux/fcntl.h
@@ -41,6 +41,7 @@
#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
#define F_SEAL_GROW 0x0004 /* prevent file from growing */
#define F_SEAL_WRITE 0x0008 /* prevent writes */
+#define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */
/* (1U << 31) is reserved for signed error codes */
/*
@@ -90,5 +91,7 @@
#define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */
#define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */
+#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
+
#endif /* _UAPI_LINUX_FCNTL_H */
diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h
index 121e82ce296b..59c71fa8c553 100644
--- a/tools/include/uapi/linux/fs.h
+++ b/tools/include/uapi/linux/fs.h
@@ -320,6 +320,9 @@ struct fscrypt_key {
#define SYNC_FILE_RANGE_WAIT_BEFORE 1
#define SYNC_FILE_RANGE_WRITE 2
#define SYNC_FILE_RANGE_WAIT_AFTER 4
+#define SYNC_FILE_RANGE_WRITE_AND_WAIT (SYNC_FILE_RANGE_WRITE | \
+ SYNC_FILE_RANGE_WAIT_BEFORE | \
+ SYNC_FILE_RANGE_WAIT_AFTER)
/*
* Flags for preadv2/pwritev2:
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index d6533828123a..5b225ff63b48 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -925,6 +925,7 @@ enum {
enum {
LINK_XSTATS_TYPE_UNSPEC,
LINK_XSTATS_TYPE_BRIDGE,
+ LINK_XSTATS_TYPE_BOND,
__LINK_XSTATS_TYPE_MAX
};
#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1)
diff --git a/tools/include/uapi/linux/if_xdp.h b/tools/include/uapi/linux/if_xdp.h
new file mode 100644
index 000000000000..caed8b1614ff
--- /dev/null
+++ b/tools/include/uapi/linux/if_xdp.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * if_xdp: XDP socket user-space interface
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ * Author(s): Björn Töpel <bjorn.topel@intel.com>
+ * Magnus Karlsson <magnus.karlsson@intel.com>
+ */
+
+#ifndef _LINUX_IF_XDP_H
+#define _LINUX_IF_XDP_H
+
+#include <linux/types.h>
+
+/* Options for the sxdp_flags field */
+#define XDP_SHARED_UMEM (1 << 0)
+#define XDP_COPY (1 << 1) /* Force copy-mode */
+#define XDP_ZEROCOPY (1 << 2) /* Force zero-copy mode */
+
+struct sockaddr_xdp {
+ __u16 sxdp_family;
+ __u16 sxdp_flags;
+ __u32 sxdp_ifindex;
+ __u32 sxdp_queue_id;
+ __u32 sxdp_shared_umem_fd;
+};
+
+struct xdp_ring_offset {
+ __u64 producer;
+ __u64 consumer;
+ __u64 desc;
+};
+
+struct xdp_mmap_offsets {
+ struct xdp_ring_offset rx;
+ struct xdp_ring_offset tx;
+ struct xdp_ring_offset fr; /* Fill */
+ struct xdp_ring_offset cr; /* Completion */
+};
+
+/* XDP socket options */
+#define XDP_MMAP_OFFSETS 1
+#define XDP_RX_RING 2
+#define XDP_TX_RING 3
+#define XDP_UMEM_REG 4
+#define XDP_UMEM_FILL_RING 5
+#define XDP_UMEM_COMPLETION_RING 6
+#define XDP_STATISTICS 7
+
+struct xdp_umem_reg {
+ __u64 addr; /* Start of packet data area */
+ __u64 len; /* Length of packet data area */
+ __u32 chunk_size;
+ __u32 headroom;
+};
+
+struct xdp_statistics {
+ __u64 rx_dropped; /* Dropped for reasons other than invalid desc */
+ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
+ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
+};
+
+/* Pgoff for mmaping the rings */
+#define XDP_PGOFF_RX_RING 0
+#define XDP_PGOFF_TX_RING 0x80000000
+#define XDP_UMEM_PGOFF_FILL_RING 0x100000000ULL
+#define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL
+
+/* Rx/Tx descriptor */
+struct xdp_desc {
+ __u64 addr;
+ __u32 len;
+ __u32 options;
+};
+
+/* UMEM descriptor is __u64 */
+
+#endif /* _LINUX_IF_XDP_H */
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
index a55cb8b10165..e7ad9d350a28 100644
--- a/tools/include/uapi/linux/in.h
+++ b/tools/include/uapi/linux/in.h
@@ -292,10 +292,11 @@ struct sockaddr_in {
#define IN_LOOPBACK(a) ((((long int) (a)) & 0xff000000) == 0x7f000000)
/* Defines for Multicast INADDR */
-#define INADDR_UNSPEC_GROUP 0xe0000000U /* 224.0.0.0 */
-#define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */
-#define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */
-#define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */
+#define INADDR_UNSPEC_GROUP 0xe0000000U /* 224.0.0.0 */
+#define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */
+#define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */
+#define INADDR_ALLSNOOPERS_GROUP 0xe000006aU /* 224.0.0.106 */
+#define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */
#endif
/* <asm/byteorder.h> contains the htonl type stuff.. */
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 6d4ea4b6c922..2fe12b40d503 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -986,8 +986,13 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163
#define KVM_CAP_EXCEPTION_PAYLOAD 164
#define KVM_CAP_ARM_VM_IPA_SIZE 165
-#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166
+#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166 /* Obsolete */
#define KVM_CAP_HYPERV_CPUID 167
+#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 168
+#define KVM_CAP_PPC_IRQ_XIVE 169
+#define KVM_CAP_ARM_SVE 170
+#define KVM_CAP_ARM_PTRAUTH_ADDRESS 171
+#define KVM_CAP_ARM_PTRAUTH_GENERIC 172
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1145,6 +1150,7 @@ struct kvm_dirty_tlb {
#define KVM_REG_SIZE_U256 0x0050000000000000ULL
#define KVM_REG_SIZE_U512 0x0060000000000000ULL
#define KVM_REG_SIZE_U1024 0x0070000000000000ULL
+#define KVM_REG_SIZE_U2048 0x0080000000000000ULL
struct kvm_reg_list {
__u64 n; /* number of regs */
@@ -1211,6 +1217,8 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_ARM_VGIC_V3 KVM_DEV_TYPE_ARM_VGIC_V3
KVM_DEV_TYPE_ARM_VGIC_ITS,
#define KVM_DEV_TYPE_ARM_VGIC_ITS KVM_DEV_TYPE_ARM_VGIC_ITS
+ KVM_DEV_TYPE_XIVE,
+#define KVM_DEV_TYPE_XIVE KVM_DEV_TYPE_XIVE
KVM_DEV_TYPE_MAX,
};
@@ -1434,12 +1442,15 @@ struct kvm_enc_region {
#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state)
-/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT */
+/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT_2 */
#define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log)
/* Available with KVM_CAP_HYPERV_CPUID */
#define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2)
+/* Available with KVM_CAP_ARM_SVE */
+#define KVM_ARM_VCPU_FINALIZE _IOW(KVMIO, 0xc2, int)
+
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
diff --git a/tools/include/uapi/linux/lirc.h b/tools/include/uapi/linux/lirc.h
index f189931042a7..45fcbf99d72e 100644
--- a/tools/include/uapi/linux/lirc.h
+++ b/tools/include/uapi/linux/lirc.h
@@ -134,6 +134,12 @@
#define LIRC_SET_WIDEBAND_RECEIVER _IOW('i', 0x00000023, __u32)
/*
+ * Return the recording timeout, which is either set by
+ * the ioctl LIRC_SET_REC_TIMEOUT or by the kernel after setting the protocols.
+ */
+#define LIRC_GET_REC_TIMEOUT _IOR('i', 0x00000024, __u32)
+
+/*
* struct lirc_scancode - decoded scancode with protocol for use with
* LIRC_MODE_SCANCODE
*
@@ -186,6 +192,9 @@ struct lirc_scancode {
* @RC_PROTO_XMP: XMP protocol
* @RC_PROTO_CEC: CEC protocol
* @RC_PROTO_IMON: iMon Pad protocol
+ * @RC_PROTO_RCMM12: RC-MM protocol 12 bits
+ * @RC_PROTO_RCMM24: RC-MM protocol 24 bits
+ * @RC_PROTO_RCMM32: RC-MM protocol 32 bits
*/
enum rc_proto {
RC_PROTO_UNKNOWN = 0,
@@ -212,6 +221,9 @@ enum rc_proto {
RC_PROTO_XMP = 21,
RC_PROTO_CEC = 22,
RC_PROTO_IMON = 23,
+ RC_PROTO_RCMM12 = 24,
+ RC_PROTO_RCMM24 = 25,
+ RC_PROTO_RCMM32 = 26,
};
#endif
diff --git a/tools/include/uapi/linux/mman.h b/tools/include/uapi/linux/mman.h
index d0f515d53299..fc1a64c3447b 100644
--- a/tools/include/uapi/linux/mman.h
+++ b/tools/include/uapi/linux/mman.h
@@ -12,6 +12,10 @@
#define OVERCOMMIT_ALWAYS 1
#define OVERCOMMIT_NEVER 2
+#define MAP_SHARED 0x01 /* Share changes */
+#define MAP_PRIVATE 0x02 /* Changes are private */
+#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
+
/*
* Huge page size encoding when MAP_HUGETLB is specified, and a huge page
* size other than the default is desired. See hugetlb_encode.h.
diff --git a/tools/include/uapi/linux/mount.h b/tools/include/uapi/linux/mount.h
index 3f9ec42510b0..96a0240f23fe 100644
--- a/tools/include/uapi/linux/mount.h
+++ b/tools/include/uapi/linux/mount.h
@@ -55,4 +55,66 @@
#define MS_MGC_VAL 0xC0ED0000
#define MS_MGC_MSK 0xffff0000
+/*
+ * open_tree() flags.
+ */
+#define OPEN_TREE_CLONE 1 /* Clone the target tree and attach the clone */
+#define OPEN_TREE_CLOEXEC O_CLOEXEC /* Close the file on execve() */
+
+/*
+ * move_mount() flags.
+ */
+#define MOVE_MOUNT_F_SYMLINKS 0x00000001 /* Follow symlinks on from path */
+#define MOVE_MOUNT_F_AUTOMOUNTS 0x00000002 /* Follow automounts on from path */
+#define MOVE_MOUNT_F_EMPTY_PATH 0x00000004 /* Empty from path permitted */
+#define MOVE_MOUNT_T_SYMLINKS 0x00000010 /* Follow symlinks on to path */
+#define MOVE_MOUNT_T_AUTOMOUNTS 0x00000020 /* Follow automounts on to path */
+#define MOVE_MOUNT_T_EMPTY_PATH 0x00000040 /* Empty to path permitted */
+#define MOVE_MOUNT__MASK 0x00000077
+
+/*
+ * fsopen() flags.
+ */
+#define FSOPEN_CLOEXEC 0x00000001
+
+/*
+ * fspick() flags.
+ */
+#define FSPICK_CLOEXEC 0x00000001
+#define FSPICK_SYMLINK_NOFOLLOW 0x00000002
+#define FSPICK_NO_AUTOMOUNT 0x00000004
+#define FSPICK_EMPTY_PATH 0x00000008
+
+/*
+ * The type of fsconfig() call made.
+ */
+enum fsconfig_command {
+ FSCONFIG_SET_FLAG = 0, /* Set parameter, supplying no value */
+ FSCONFIG_SET_STRING = 1, /* Set parameter, supplying a string value */
+ FSCONFIG_SET_BINARY = 2, /* Set parameter, supplying a binary blob value */
+ FSCONFIG_SET_PATH = 3, /* Set parameter, supplying an object by path */
+ FSCONFIG_SET_PATH_EMPTY = 4, /* Set parameter, supplying an object by (empty) path */
+ FSCONFIG_SET_FD = 5, /* Set parameter, supplying an object by fd */
+ FSCONFIG_CMD_CREATE = 6, /* Invoke superblock creation */
+ FSCONFIG_CMD_RECONFIGURE = 7, /* Invoke superblock reconfiguration */
+};
+
+/*
+ * fsmount() flags.
+ */
+#define FSMOUNT_CLOEXEC 0x00000001
+
+/*
+ * Mount attributes.
+ */
+#define MOUNT_ATTR_RDONLY 0x00000001 /* Mount read-only */
+#define MOUNT_ATTR_NOSUID 0x00000002 /* Ignore suid and sgid bits */
+#define MOUNT_ATTR_NODEV 0x00000004 /* Disallow access to device special files */
+#define MOUNT_ATTR_NOEXEC 0x00000008 /* Disallow program execution */
+#define MOUNT_ATTR__ATIME 0x00000070 /* Setting on how atime should be updated */
+#define MOUNT_ATTR_RELATIME 0x00000000 /* - Update atime relative to mtime/ctime. */
+#define MOUNT_ATTR_NOATIME 0x00000010 /* - Do not update access times. */
+#define MOUNT_ATTR_STRICTATIME 0x00000020 /* - Always perform atime updates */
+#define MOUNT_ATTR_NODIRATIME 0x00000080 /* Do not update directory access times */
+
#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 9de8780ac8d9..7198ddd0c6b1 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -372,7 +372,9 @@ struct perf_event_attr {
context_switch : 1, /* context switch data */
write_backward : 1, /* Write ring buffer from end to beginning */
namespaces : 1, /* include namespaces data */
- __reserved_1 : 35;
+ ksymbol : 1, /* include ksymbol events */
+ bpf_event : 1, /* include bpf events */
+ __reserved_1 : 33;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -445,8 +447,6 @@ struct perf_event_query_bpf {
__u32 ids[0];
};
-#define perf_flags(attr) (*(&(attr)->read_format + 1))
-
/*
* Ioctls that can be done on a perf event fd:
*/
@@ -965,9 +965,58 @@ enum perf_event_type {
*/
PERF_RECORD_NAMESPACES = 16,
+ /*
+ * Record ksymbol register/unregister events:
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u64 addr;
+ * u32 len;
+ * u16 ksym_type;
+ * u16 flags;
+ * char name[];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_KSYMBOL = 17,
+
+ /*
+ * Record bpf events:
+ * enum perf_bpf_event_type {
+ * PERF_BPF_EVENT_UNKNOWN = 0,
+ * PERF_BPF_EVENT_PROG_LOAD = 1,
+ * PERF_BPF_EVENT_PROG_UNLOAD = 2,
+ * };
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u16 type;
+ * u16 flags;
+ * u32 id;
+ * u8 tag[BPF_TAG_SIZE];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_BPF_EVENT = 18,
+
PERF_RECORD_MAX, /* non-ABI */
};
+enum perf_record_ksymbol_type {
+ PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0,
+ PERF_RECORD_KSYMBOL_TYPE_BPF = 1,
+ PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */
+};
+
+#define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0)
+
+enum perf_bpf_event_type {
+ PERF_BPF_EVENT_UNKNOWN = 0,
+ PERF_BPF_EVENT_PROG_LOAD = 1,
+ PERF_BPF_EVENT_PROG_UNLOAD = 2,
+ PERF_BPF_EVENT_MAX, /* non-ABI */
+};
+
#define PERF_MAX_STACK_DEPTH 127
#define PERF_MAX_CONTEXTS_PER_STACK 8
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index b4875a93363a..094bb03b9cc2 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -219,6 +219,7 @@ struct prctl_mm_map {
# define PR_SPEC_ENABLE (1UL << 1)
# define PR_SPEC_DISABLE (1UL << 2)
# define PR_SPEC_FORCE_DISABLE (1UL << 3)
+# define PR_SPEC_DISABLE_NOEXEC (1UL << 4)
/* Reset arm64 pointer authentication keys */
#define PR_PAC_RESET_KEYS 54
diff --git a/tools/include/uapi/linux/sched.h b/tools/include/uapi/linux/sched.h
index 22627f80063e..ed4ee170bee2 100644
--- a/tools/include/uapi/linux/sched.h
+++ b/tools/include/uapi/linux/sched.h
@@ -10,6 +10,7 @@
#define CLONE_FS 0x00000200 /* set if fs info shared between processes */
#define CLONE_FILES 0x00000400 /* set if open files shared between processes */
#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */
+#define CLONE_PIDFD 0x00001000 /* set if a pidfd should be placed in parent */
#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */
#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */
#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */
diff --git a/tools/include/uapi/linux/tc_act/tc_bpf.h b/tools/include/uapi/linux/tc_act/tc_bpf.h
index 6e89a5df49a4..653c4f94f76e 100644
--- a/tools/include/uapi/linux/tc_act/tc_bpf.h
+++ b/tools/include/uapi/linux/tc_act/tc_bpf.h
@@ -13,8 +13,6 @@
#include <linux/pkt_cls.h>
-#define TCA_ACT_BPF 13
-
struct tc_act_bpf {
tc_gen;
};
diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h
index 404d4b9ffe76..df1153cea0b7 100644
--- a/tools/include/uapi/sound/asound.h
+++ b/tools/include/uapi/sound/asound.h
@@ -32,6 +32,7 @@
#ifndef __KERNEL__
#include <stdlib.h>
+#include <time.h>
#endif
/*
diff --git a/tools/io_uring/Makefile b/tools/io_uring/Makefile
new file mode 100644
index 000000000000..00f146c54c53
--- /dev/null
+++ b/tools/io_uring/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for io_uring test tools
+CFLAGS += -Wall -Wextra -g -D_GNU_SOURCE
+LDLIBS += -lpthread
+
+all: io_uring-cp io_uring-bench
+%: %.c
+ $(CC) $(CFLAGS) -o $@ $^
+
+io_uring-bench: syscall.o io_uring-bench.o
+ $(CC) $(CFLAGS) -o $@ $^ $(LDLIBS)
+
+io_uring-cp: setup.o syscall.o queue.o
+
+clean:
+ $(RM) io_uring-cp io_uring-bench *.o
+
+.PHONY: all clean
diff --git a/tools/io_uring/README b/tools/io_uring/README
new file mode 100644
index 000000000000..67fd70115cff
--- /dev/null
+++ b/tools/io_uring/README
@@ -0,0 +1,29 @@
+This directory includes a few programs that demonstrate how to use io_uring
+in an application. The examples are:
+
+io_uring-cp
+ A very basic io_uring implementation of cp(1). It takes two
+ arguments, copies the first argument to the second. This example
+ is part of liburing, and hence uses the simplified liburing API
+ for setting up an io_uring instance, submitting IO, completing IO,
+ etc. The support functions in queue.c and setup.c are straight
+ out of liburing.
+
+io_uring-bench
+ Benchmark program that does random reads on a number of files. This
+ app demonstrates the various features of io_uring, like fixed files,
+ fixed buffers, and polled IO. There are options in the program to
+ control which features to use. Arguments is the file (or files) that
+ io_uring-bench should operate on. This uses the raw io_uring
+ interface.
+
+liburing can be cloned with git here:
+
+ git://git.kernel.dk/liburing
+
+and contains a number of unit tests as well for testing io_uring. It also
+comes with man pages for the three system calls.
+
+Fio includes an io_uring engine, you can clone fio here:
+
+ git://git.kernel.dk/fio
diff --git a/tools/io_uring/barrier.h b/tools/io_uring/barrier.h
new file mode 100644
index 000000000000..ef00f6722ba9
--- /dev/null
+++ b/tools/io_uring/barrier.h
@@ -0,0 +1,16 @@
+#ifndef LIBURING_BARRIER_H
+#define LIBURING_BARRIER_H
+
+#if defined(__x86_64) || defined(__i386__)
+#define read_barrier() __asm__ __volatile__("":::"memory")
+#define write_barrier() __asm__ __volatile__("":::"memory")
+#else
+/*
+ * Add arch appropriate definitions. Be safe and use full barriers for
+ * archs we don't have support for.
+ */
+#define read_barrier() __sync_synchronize()
+#define write_barrier() __sync_synchronize()
+#endif
+
+#endif
diff --git a/tools/io_uring/io_uring-bench.c b/tools/io_uring/io_uring-bench.c
new file mode 100644
index 000000000000..0f257139b003
--- /dev/null
+++ b/tools/io_uring/io_uring-bench.c
@@ -0,0 +1,592 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Simple benchmark program that uses the various features of io_uring
+ * to provide fast random access to a device/file. It has various
+ * options that are control how we use io_uring, see the OPTIONS section
+ * below. This uses the raw io_uring interface.
+ *
+ * Copyright (C) 2018-2019 Jens Axboe
+ */
+#include <stdio.h>
+#include <errno.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <signal.h>
+#include <inttypes.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/syscall.h>
+#include <sys/resource.h>
+#include <sys/mman.h>
+#include <sys/uio.h>
+#include <linux/fs.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <pthread.h>
+#include <sched.h>
+
+#include "liburing.h"
+#include "barrier.h"
+
+#define min(a, b) ((a < b) ? (a) : (b))
+
+struct io_sq_ring {
+ unsigned *head;
+ unsigned *tail;
+ unsigned *ring_mask;
+ unsigned *ring_entries;
+ unsigned *flags;
+ unsigned *array;
+};
+
+struct io_cq_ring {
+ unsigned *head;
+ unsigned *tail;
+ unsigned *ring_mask;
+ unsigned *ring_entries;
+ struct io_uring_cqe *cqes;
+};
+
+#define DEPTH 128
+
+#define BATCH_SUBMIT 32
+#define BATCH_COMPLETE 32
+
+#define BS 4096
+
+#define MAX_FDS 16
+
+static unsigned sq_ring_mask, cq_ring_mask;
+
+struct file {
+ unsigned long max_blocks;
+ unsigned pending_ios;
+ int real_fd;
+ int fixed_fd;
+};
+
+struct submitter {
+ pthread_t thread;
+ int ring_fd;
+ struct drand48_data rand;
+ struct io_sq_ring sq_ring;
+ struct io_uring_sqe *sqes;
+ struct iovec iovecs[DEPTH];
+ struct io_cq_ring cq_ring;
+ int inflight;
+ unsigned long reaps;
+ unsigned long done;
+ unsigned long calls;
+ volatile int finish;
+
+ __s32 *fds;
+
+ struct file files[MAX_FDS];
+ unsigned nr_files;
+ unsigned cur_file;
+};
+
+static struct submitter submitters[1];
+static volatile int finish;
+
+/*
+ * OPTIONS: Set these to test the various features of io_uring.
+ */
+static int polled = 1; /* use IO polling */
+static int fixedbufs = 1; /* use fixed user buffers */
+static int register_files = 1; /* use fixed files */
+static int buffered = 0; /* use buffered IO, not O_DIRECT */
+static int sq_thread_poll = 0; /* use kernel submission/poller thread */
+static int sq_thread_cpu = -1; /* pin above thread to this CPU */
+static int do_nop = 0; /* no-op SQ ring commands */
+
+static int io_uring_register_buffers(struct submitter *s)
+{
+ if (do_nop)
+ return 0;
+
+ return io_uring_register(s->ring_fd, IORING_REGISTER_BUFFERS, s->iovecs,
+ DEPTH);
+}
+
+static int io_uring_register_files(struct submitter *s)
+{
+ unsigned i;
+
+ if (do_nop)
+ return 0;
+
+ s->fds = calloc(s->nr_files, sizeof(__s32));
+ for (i = 0; i < s->nr_files; i++) {
+ s->fds[i] = s->files[i].real_fd;
+ s->files[i].fixed_fd = i;
+ }
+
+ return io_uring_register(s->ring_fd, IORING_REGISTER_FILES, s->fds,
+ s->nr_files);
+}
+
+static int gettid(void)
+{
+ return syscall(__NR_gettid);
+}
+
+static unsigned file_depth(struct submitter *s)
+{
+ return (DEPTH + s->nr_files - 1) / s->nr_files;
+}
+
+static void init_io(struct submitter *s, unsigned index)
+{
+ struct io_uring_sqe *sqe = &s->sqes[index];
+ unsigned long offset;
+ struct file *f;
+ long r;
+
+ if (do_nop) {
+ sqe->opcode = IORING_OP_NOP;
+ return;
+ }
+
+ if (s->nr_files == 1) {
+ f = &s->files[0];
+ } else {
+ f = &s->files[s->cur_file];
+ if (f->pending_ios >= file_depth(s)) {
+ s->cur_file++;
+ if (s->cur_file == s->nr_files)
+ s->cur_file = 0;
+ f = &s->files[s->cur_file];
+ }
+ }
+ f->pending_ios++;
+
+ lrand48_r(&s->rand, &r);
+ offset = (r % (f->max_blocks - 1)) * BS;
+
+ if (register_files) {
+ sqe->flags = IOSQE_FIXED_FILE;
+ sqe->fd = f->fixed_fd;
+ } else {
+ sqe->flags = 0;
+ sqe->fd = f->real_fd;
+ }
+ if (fixedbufs) {
+ sqe->opcode = IORING_OP_READ_FIXED;
+ sqe->addr = (unsigned long) s->iovecs[index].iov_base;
+ sqe->len = BS;
+ sqe->buf_index = index;
+ } else {
+ sqe->opcode = IORING_OP_READV;
+ sqe->addr = (unsigned long) &s->iovecs[index];
+ sqe->len = 1;
+ sqe->buf_index = 0;
+ }
+ sqe->ioprio = 0;
+ sqe->off = offset;
+ sqe->user_data = (unsigned long) f;
+}
+
+static int prep_more_ios(struct submitter *s, unsigned max_ios)
+{
+ struct io_sq_ring *ring = &s->sq_ring;
+ unsigned index, tail, next_tail, prepped = 0;
+
+ next_tail = tail = *ring->tail;
+ do {
+ next_tail++;
+ read_barrier();
+ if (next_tail == *ring->head)
+ break;
+
+ index = tail & sq_ring_mask;
+ init_io(s, index);
+ ring->array[index] = index;
+ prepped++;
+ tail = next_tail;
+ } while (prepped < max_ios);
+
+ if (*ring->tail != tail) {
+ /* order tail store with writes to sqes above */
+ write_barrier();
+ *ring->tail = tail;
+ write_barrier();
+ }
+ return prepped;
+}
+
+static int get_file_size(struct file *f)
+{
+ struct stat st;
+
+ if (fstat(f->real_fd, &st) < 0)
+ return -1;
+ if (S_ISBLK(st.st_mode)) {
+ unsigned long long bytes;
+
+ if (ioctl(f->real_fd, BLKGETSIZE64, &bytes) != 0)
+ return -1;
+
+ f->max_blocks = bytes / BS;
+ return 0;
+ } else if (S_ISREG(st.st_mode)) {
+ f->max_blocks = st.st_size / BS;
+ return 0;
+ }
+
+ return -1;
+}
+
+static int reap_events(struct submitter *s)
+{
+ struct io_cq_ring *ring = &s->cq_ring;
+ struct io_uring_cqe *cqe;
+ unsigned head, reaped = 0;
+
+ head = *ring->head;
+ do {
+ struct file *f;
+
+ read_barrier();
+ if (head == *ring->tail)
+ break;
+ cqe = &ring->cqes[head & cq_ring_mask];
+ if (!do_nop) {
+ f = (struct file *) (uintptr_t) cqe->user_data;
+ f->pending_ios--;
+ if (cqe->res != BS) {
+ printf("io: unexpected ret=%d\n", cqe->res);
+ if (polled && cqe->res == -EOPNOTSUPP)
+ printf("Your filesystem doesn't support poll\n");
+ return -1;
+ }
+ }
+ reaped++;
+ head++;
+ } while (1);
+
+ s->inflight -= reaped;
+ *ring->head = head;
+ write_barrier();
+ return reaped;
+}
+
+static void *submitter_fn(void *data)
+{
+ struct submitter *s = data;
+ struct io_sq_ring *ring = &s->sq_ring;
+ int ret, prepped;
+
+ printf("submitter=%d\n", gettid());
+
+ srand48_r(pthread_self(), &s->rand);
+
+ prepped = 0;
+ do {
+ int to_wait, to_submit, this_reap, to_prep;
+
+ if (!prepped && s->inflight < DEPTH) {
+ to_prep = min(DEPTH - s->inflight, BATCH_SUBMIT);
+ prepped = prep_more_ios(s, to_prep);
+ }
+ s->inflight += prepped;
+submit_more:
+ to_submit = prepped;
+submit:
+ if (to_submit && (s->inflight + to_submit <= DEPTH))
+ to_wait = 0;
+ else
+ to_wait = min(s->inflight + to_submit, BATCH_COMPLETE);
+
+ /*
+ * Only need to call io_uring_enter if we're not using SQ thread
+ * poll, or if IORING_SQ_NEED_WAKEUP is set.
+ */
+ if (!sq_thread_poll || (*ring->flags & IORING_SQ_NEED_WAKEUP)) {
+ unsigned flags = 0;
+
+ if (to_wait)
+ flags = IORING_ENTER_GETEVENTS;
+ if ((*ring->flags & IORING_SQ_NEED_WAKEUP))
+ flags |= IORING_ENTER_SQ_WAKEUP;
+ ret = io_uring_enter(s->ring_fd, to_submit, to_wait,
+ flags, NULL);
+ s->calls++;
+ }
+
+ /*
+ * For non SQ thread poll, we already got the events we needed
+ * through the io_uring_enter() above. For SQ thread poll, we
+ * need to loop here until we find enough events.
+ */
+ this_reap = 0;
+ do {
+ int r;
+ r = reap_events(s);
+ if (r == -1) {
+ s->finish = 1;
+ break;
+ } else if (r > 0)
+ this_reap += r;
+ } while (sq_thread_poll && this_reap < to_wait);
+ s->reaps += this_reap;
+
+ if (ret >= 0) {
+ if (!ret) {
+ to_submit = 0;
+ if (s->inflight)
+ goto submit;
+ continue;
+ } else if (ret < to_submit) {
+ int diff = to_submit - ret;
+
+ s->done += ret;
+ prepped -= diff;
+ goto submit_more;
+ }
+ s->done += ret;
+ prepped = 0;
+ continue;
+ } else if (ret < 0) {
+ if (errno == EAGAIN) {
+ if (s->finish)
+ break;
+ if (this_reap)
+ goto submit;
+ to_submit = 0;
+ goto submit;
+ }
+ printf("io_submit: %s\n", strerror(errno));
+ break;
+ }
+ } while (!s->finish);
+
+ finish = 1;
+ return NULL;
+}
+
+static void sig_int(int sig)
+{
+ printf("Exiting on signal %d\n", sig);
+ submitters[0].finish = 1;
+ finish = 1;
+}
+
+static void arm_sig_int(void)
+{
+ struct sigaction act;
+
+ memset(&act, 0, sizeof(act));
+ act.sa_handler = sig_int;
+ act.sa_flags = SA_RESTART;
+ sigaction(SIGINT, &act, NULL);
+}
+
+static int setup_ring(struct submitter *s)
+{
+ struct io_sq_ring *sring = &s->sq_ring;
+ struct io_cq_ring *cring = &s->cq_ring;
+ struct io_uring_params p;
+ int ret, fd;
+ void *ptr;
+
+ memset(&p, 0, sizeof(p));
+
+ if (polled && !do_nop)
+ p.flags |= IORING_SETUP_IOPOLL;
+ if (sq_thread_poll) {
+ p.flags |= IORING_SETUP_SQPOLL;
+ if (sq_thread_cpu != -1) {
+ p.flags |= IORING_SETUP_SQ_AFF;
+ p.sq_thread_cpu = sq_thread_cpu;
+ }
+ }
+
+ fd = io_uring_setup(DEPTH, &p);
+ if (fd < 0) {
+ perror("io_uring_setup");
+ return 1;
+ }
+ s->ring_fd = fd;
+
+ if (fixedbufs) {
+ ret = io_uring_register_buffers(s);
+ if (ret < 0) {
+ perror("io_uring_register_buffers");
+ return 1;
+ }
+ }
+
+ if (register_files) {
+ ret = io_uring_register_files(s);
+ if (ret < 0) {
+ perror("io_uring_register_files");
+ return 1;
+ }
+ }
+
+ ptr = mmap(0, p.sq_off.array + p.sq_entries * sizeof(__u32),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
+ IORING_OFF_SQ_RING);
+ printf("sq_ring ptr = 0x%p\n", ptr);
+ sring->head = ptr + p.sq_off.head;
+ sring->tail = ptr + p.sq_off.tail;
+ sring->ring_mask = ptr + p.sq_off.ring_mask;
+ sring->ring_entries = ptr + p.sq_off.ring_entries;
+ sring->flags = ptr + p.sq_off.flags;
+ sring->array = ptr + p.sq_off.array;
+ sq_ring_mask = *sring->ring_mask;
+
+ s->sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
+ IORING_OFF_SQES);
+ printf("sqes ptr = 0x%p\n", s->sqes);
+
+ ptr = mmap(0, p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
+ IORING_OFF_CQ_RING);
+ printf("cq_ring ptr = 0x%p\n", ptr);
+ cring->head = ptr + p.cq_off.head;
+ cring->tail = ptr + p.cq_off.tail;
+ cring->ring_mask = ptr + p.cq_off.ring_mask;
+ cring->ring_entries = ptr + p.cq_off.ring_entries;
+ cring->cqes = ptr + p.cq_off.cqes;
+ cq_ring_mask = *cring->ring_mask;
+ return 0;
+}
+
+static void file_depths(char *buf)
+{
+ struct submitter *s = &submitters[0];
+ unsigned i;
+ char *p;
+
+ buf[0] = '\0';
+ p = buf;
+ for (i = 0; i < s->nr_files; i++) {
+ struct file *f = &s->files[i];
+
+ if (i + 1 == s->nr_files)
+ p += sprintf(p, "%d", f->pending_ios);
+ else
+ p += sprintf(p, "%d, ", f->pending_ios);
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ struct submitter *s = &submitters[0];
+ unsigned long done, calls, reap;
+ int err, i, flags, fd;
+ char *fdepths;
+ void *ret;
+
+ if (!do_nop && argc < 2) {
+ printf("%s: filename\n", argv[0]);
+ return 1;
+ }
+
+ flags = O_RDONLY | O_NOATIME;
+ if (!buffered)
+ flags |= O_DIRECT;
+
+ i = 1;
+ while (!do_nop && i < argc) {
+ struct file *f;
+
+ if (s->nr_files == MAX_FDS) {
+ printf("Max number of files (%d) reached\n", MAX_FDS);
+ break;
+ }
+ fd = open(argv[i], flags);
+ if (fd < 0) {
+ perror("open");
+ return 1;
+ }
+
+ f = &s->files[s->nr_files];
+ f->real_fd = fd;
+ if (get_file_size(f)) {
+ printf("failed getting size of device/file\n");
+ return 1;
+ }
+ if (f->max_blocks <= 1) {
+ printf("Zero file/device size?\n");
+ return 1;
+ }
+ f->max_blocks--;
+
+ printf("Added file %s\n", argv[i]);
+ s->nr_files++;
+ i++;
+ }
+
+ if (fixedbufs) {
+ struct rlimit rlim;
+
+ rlim.rlim_cur = RLIM_INFINITY;
+ rlim.rlim_max = RLIM_INFINITY;
+ if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0) {
+ perror("setrlimit");
+ return 1;
+ }
+ }
+
+ arm_sig_int();
+
+ for (i = 0; i < DEPTH; i++) {
+ void *buf;
+
+ if (posix_memalign(&buf, BS, BS)) {
+ printf("failed alloc\n");
+ return 1;
+ }
+ s->iovecs[i].iov_base = buf;
+ s->iovecs[i].iov_len = BS;
+ }
+
+ err = setup_ring(s);
+ if (err) {
+ printf("ring setup failed: %s, %d\n", strerror(errno), err);
+ return 1;
+ }
+ printf("polled=%d, fixedbufs=%d, buffered=%d", polled, fixedbufs, buffered);
+ printf(" QD=%d, sq_ring=%d, cq_ring=%d\n", DEPTH, *s->sq_ring.ring_entries, *s->cq_ring.ring_entries);
+
+ pthread_create(&s->thread, NULL, submitter_fn, s);
+
+ fdepths = malloc(8 * s->nr_files);
+ reap = calls = done = 0;
+ do {
+ unsigned long this_done = 0;
+ unsigned long this_reap = 0;
+ unsigned long this_call = 0;
+ unsigned long rpc = 0, ipc = 0;
+
+ sleep(1);
+ this_done += s->done;
+ this_call += s->calls;
+ this_reap += s->reaps;
+ if (this_call - calls) {
+ rpc = (this_done - done) / (this_call - calls);
+ ipc = (this_reap - reap) / (this_call - calls);
+ } else
+ rpc = ipc = -1;
+ file_depths(fdepths);
+ printf("IOPS=%lu, IOS/call=%ld/%ld, inflight=%u (%s)\n",
+ this_done - done, rpc, ipc, s->inflight,
+ fdepths);
+ done = this_done;
+ calls = this_call;
+ reap = this_reap;
+ } while (!finish);
+
+ pthread_join(s->thread, &ret);
+ close(s->ring_fd);
+ free(fdepths);
+ return 0;
+}
diff --git a/tools/io_uring/io_uring-cp.c b/tools/io_uring/io_uring-cp.c
new file mode 100644
index 000000000000..81461813ec62
--- /dev/null
+++ b/tools/io_uring/io_uring-cp.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Simple test program that demonstrates a file copy through io_uring. This
+ * uses the API exposed by liburing.
+ *
+ * Copyright (C) 2018-2019 Jens Axboe
+ */
+#include <stdio.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+
+#include "liburing.h"
+
+#define QD 64
+#define BS (32*1024)
+
+static int infd, outfd;
+
+struct io_data {
+ int read;
+ off_t first_offset, offset;
+ size_t first_len;
+ struct iovec iov;
+};
+
+static int setup_context(unsigned entries, struct io_uring *ring)
+{
+ int ret;
+
+ ret = io_uring_queue_init(entries, ring, 0);
+ if (ret < 0) {
+ fprintf(stderr, "queue_init: %s\n", strerror(-ret));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int get_file_size(int fd, off_t *size)
+{
+ struct stat st;
+
+ if (fstat(fd, &st) < 0)
+ return -1;
+ if (S_ISREG(st.st_mode)) {
+ *size = st.st_size;
+ return 0;
+ } else if (S_ISBLK(st.st_mode)) {
+ unsigned long long bytes;
+
+ if (ioctl(fd, BLKGETSIZE64, &bytes) != 0)
+ return -1;
+
+ *size = bytes;
+ return 0;
+ }
+
+ return -1;
+}
+
+static void queue_prepped(struct io_uring *ring, struct io_data *data)
+{
+ struct io_uring_sqe *sqe;
+
+ sqe = io_uring_get_sqe(ring);
+ assert(sqe);
+
+ if (data->read)
+ io_uring_prep_readv(sqe, infd, &data->iov, 1, data->offset);
+ else
+ io_uring_prep_writev(sqe, outfd, &data->iov, 1, data->offset);
+
+ io_uring_sqe_set_data(sqe, data);
+}
+
+static int queue_read(struct io_uring *ring, off_t size, off_t offset)
+{
+ struct io_uring_sqe *sqe;
+ struct io_data *data;
+
+ data = malloc(size + sizeof(*data));
+ if (!data)
+ return 1;
+
+ sqe = io_uring_get_sqe(ring);
+ if (!sqe) {
+ free(data);
+ return 1;
+ }
+
+ data->read = 1;
+ data->offset = data->first_offset = offset;
+
+ data->iov.iov_base = data + 1;
+ data->iov.iov_len = size;
+ data->first_len = size;
+
+ io_uring_prep_readv(sqe, infd, &data->iov, 1, offset);
+ io_uring_sqe_set_data(sqe, data);
+ return 0;
+}
+
+static void queue_write(struct io_uring *ring, struct io_data *data)
+{
+ data->read = 0;
+ data->offset = data->first_offset;
+
+ data->iov.iov_base = data + 1;
+ data->iov.iov_len = data->first_len;
+
+ queue_prepped(ring, data);
+ io_uring_submit(ring);
+}
+
+static int copy_file(struct io_uring *ring, off_t insize)
+{
+ unsigned long reads, writes;
+ struct io_uring_cqe *cqe;
+ off_t write_left, offset;
+ int ret;
+
+ write_left = insize;
+ writes = reads = offset = 0;
+
+ while (insize || write_left) {
+ unsigned long had_reads;
+ int got_comp;
+
+ /*
+ * Queue up as many reads as we can
+ */
+ had_reads = reads;
+ while (insize) {
+ off_t this_size = insize;
+
+ if (reads + writes >= QD)
+ break;
+ if (this_size > BS)
+ this_size = BS;
+ else if (!this_size)
+ break;
+
+ if (queue_read(ring, this_size, offset))
+ break;
+
+ insize -= this_size;
+ offset += this_size;
+ reads++;
+ }
+
+ if (had_reads != reads) {
+ ret = io_uring_submit(ring);
+ if (ret < 0) {
+ fprintf(stderr, "io_uring_submit: %s\n", strerror(-ret));
+ break;
+ }
+ }
+
+ /*
+ * Queue is full at this point. Find at least one completion.
+ */
+ got_comp = 0;
+ while (write_left) {
+ struct io_data *data;
+
+ if (!got_comp) {
+ ret = io_uring_wait_cqe(ring, &cqe);
+ got_comp = 1;
+ } else
+ ret = io_uring_peek_cqe(ring, &cqe);
+ if (ret < 0) {
+ fprintf(stderr, "io_uring_peek_cqe: %s\n",
+ strerror(-ret));
+ return 1;
+ }
+ if (!cqe)
+ break;
+
+ data = io_uring_cqe_get_data(cqe);
+ if (cqe->res < 0) {
+ if (cqe->res == -EAGAIN) {
+ queue_prepped(ring, data);
+ io_uring_cqe_seen(ring, cqe);
+ continue;
+ }
+ fprintf(stderr, "cqe failed: %s\n",
+ strerror(-cqe->res));
+ return 1;
+ } else if ((size_t) cqe->res != data->iov.iov_len) {
+ /* Short read/write, adjust and requeue */
+ data->iov.iov_base += cqe->res;
+ data->iov.iov_len -= cqe->res;
+ data->offset += cqe->res;
+ queue_prepped(ring, data);
+ io_uring_cqe_seen(ring, cqe);
+ continue;
+ }
+
+ /*
+ * All done. if write, nothing else to do. if read,
+ * queue up corresponding write.
+ */
+ if (data->read) {
+ queue_write(ring, data);
+ write_left -= data->first_len;
+ reads--;
+ writes++;
+ } else {
+ free(data);
+ writes--;
+ }
+ io_uring_cqe_seen(ring, cqe);
+ }
+ }
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ struct io_uring ring;
+ off_t insize;
+ int ret;
+
+ if (argc < 3) {
+ printf("%s: infile outfile\n", argv[0]);
+ return 1;
+ }
+
+ infd = open(argv[1], O_RDONLY);
+ if (infd < 0) {
+ perror("open infile");
+ return 1;
+ }
+ outfd = open(argv[2], O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ if (outfd < 0) {
+ perror("open outfile");
+ return 1;
+ }
+
+ if (setup_context(QD, &ring))
+ return 1;
+ if (get_file_size(infd, &insize))
+ return 1;
+
+ ret = copy_file(&ring, insize);
+
+ close(infd);
+ close(outfd);
+ io_uring_queue_exit(&ring);
+ return ret;
+}
diff --git a/tools/io_uring/liburing.h b/tools/io_uring/liburing.h
new file mode 100644
index 000000000000..5f305c86b892
--- /dev/null
+++ b/tools/io_uring/liburing.h
@@ -0,0 +1,183 @@
+#ifndef LIB_URING_H
+#define LIB_URING_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/uio.h>
+#include <signal.h>
+#include <string.h>
+#include "../../include/uapi/linux/io_uring.h"
+#include <inttypes.h>
+#include "barrier.h"
+
+/*
+ * Library interface to io_uring
+ */
+struct io_uring_sq {
+ unsigned *khead;
+ unsigned *ktail;
+ unsigned *kring_mask;
+ unsigned *kring_entries;
+ unsigned *kflags;
+ unsigned *kdropped;
+ unsigned *array;
+ struct io_uring_sqe *sqes;
+
+ unsigned sqe_head;
+ unsigned sqe_tail;
+
+ size_t ring_sz;
+};
+
+struct io_uring_cq {
+ unsigned *khead;
+ unsigned *ktail;
+ unsigned *kring_mask;
+ unsigned *kring_entries;
+ unsigned *koverflow;
+ struct io_uring_cqe *cqes;
+
+ size_t ring_sz;
+};
+
+struct io_uring {
+ struct io_uring_sq sq;
+ struct io_uring_cq cq;
+ int ring_fd;
+};
+
+/*
+ * System calls
+ */
+extern int io_uring_setup(unsigned entries, struct io_uring_params *p);
+extern int io_uring_enter(int fd, unsigned to_submit,
+ unsigned min_complete, unsigned flags, sigset_t *sig);
+extern int io_uring_register(int fd, unsigned int opcode, void *arg,
+ unsigned int nr_args);
+
+/*
+ * Library interface
+ */
+extern int io_uring_queue_init(unsigned entries, struct io_uring *ring,
+ unsigned flags);
+extern int io_uring_queue_mmap(int fd, struct io_uring_params *p,
+ struct io_uring *ring);
+extern void io_uring_queue_exit(struct io_uring *ring);
+extern int io_uring_peek_cqe(struct io_uring *ring,
+ struct io_uring_cqe **cqe_ptr);
+extern int io_uring_wait_cqe(struct io_uring *ring,
+ struct io_uring_cqe **cqe_ptr);
+extern int io_uring_submit(struct io_uring *ring);
+extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
+
+/*
+ * Must be called after io_uring_{peek,wait}_cqe() after the cqe has
+ * been processed by the application.
+ */
+static inline void io_uring_cqe_seen(struct io_uring *ring,
+ struct io_uring_cqe *cqe)
+{
+ if (cqe) {
+ struct io_uring_cq *cq = &ring->cq;
+
+ (*cq->khead)++;
+ /*
+ * Ensure that the kernel sees our new head, the kernel has
+ * the matching read barrier.
+ */
+ write_barrier();
+ }
+}
+
+/*
+ * Command prep helpers
+ */
+static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
+{
+ sqe->user_data = (unsigned long) data;
+}
+
+static inline void *io_uring_cqe_get_data(struct io_uring_cqe *cqe)
+{
+ return (void *) (uintptr_t) cqe->user_data;
+}
+
+static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
+ const void *addr, unsigned len,
+ off_t offset)
+{
+ memset(sqe, 0, sizeof(*sqe));
+ sqe->opcode = op;
+ sqe->fd = fd;
+ sqe->off = offset;
+ sqe->addr = (unsigned long) addr;
+ sqe->len = len;
+}
+
+static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
+ const struct iovec *iovecs,
+ unsigned nr_vecs, off_t offset)
+{
+ io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
+}
+
+static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
+ void *buf, unsigned nbytes,
+ off_t offset)
+{
+ io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
+}
+
+static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
+ const struct iovec *iovecs,
+ unsigned nr_vecs, off_t offset)
+{
+ io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
+}
+
+static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
+ const void *buf, unsigned nbytes,
+ off_t offset)
+{
+ io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
+}
+
+static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
+ short poll_mask)
+{
+ memset(sqe, 0, sizeof(*sqe));
+ sqe->opcode = IORING_OP_POLL_ADD;
+ sqe->fd = fd;
+ sqe->poll_events = poll_mask;
+}
+
+static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
+ void *user_data)
+{
+ memset(sqe, 0, sizeof(*sqe));
+ sqe->opcode = IORING_OP_POLL_REMOVE;
+ sqe->addr = (unsigned long) user_data;
+}
+
+static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
+ unsigned fsync_flags)
+{
+ memset(sqe, 0, sizeof(*sqe));
+ sqe->opcode = IORING_OP_FSYNC;
+ sqe->fd = fd;
+ sqe->fsync_flags = fsync_flags;
+}
+
+static inline void io_uring_prep_nop(struct io_uring_sqe *sqe)
+{
+ memset(sqe, 0, sizeof(*sqe));
+ sqe->opcode = IORING_OP_NOP;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/tools/io_uring/queue.c b/tools/io_uring/queue.c
new file mode 100644
index 000000000000..321819c132c7
--- /dev/null
+++ b/tools/io_uring/queue.c
@@ -0,0 +1,156 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+
+#include "liburing.h"
+#include "barrier.h"
+
+static int __io_uring_get_cqe(struct io_uring *ring,
+ struct io_uring_cqe **cqe_ptr, int wait)
+{
+ struct io_uring_cq *cq = &ring->cq;
+ const unsigned mask = *cq->kring_mask;
+ unsigned head;
+ int ret;
+
+ *cqe_ptr = NULL;
+ head = *cq->khead;
+ do {
+ /*
+ * It's necessary to use a read_barrier() before reading
+ * the CQ tail, since the kernel updates it locklessly. The
+ * kernel has the matching store barrier for the update. The
+ * kernel also ensures that previous stores to CQEs are ordered
+ * with the tail update.
+ */
+ read_barrier();
+ if (head != *cq->ktail) {
+ *cqe_ptr = &cq->cqes[head & mask];
+ break;
+ }
+ if (!wait)
+ break;
+ ret = io_uring_enter(ring->ring_fd, 0, 1,
+ IORING_ENTER_GETEVENTS, NULL);
+ if (ret < 0)
+ return -errno;
+ } while (1);
+
+ return 0;
+}
+
+/*
+ * Return an IO completion, if one is readily available. Returns 0 with
+ * cqe_ptr filled in on success, -errno on failure.
+ */
+int io_uring_peek_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr)
+{
+ return __io_uring_get_cqe(ring, cqe_ptr, 0);
+}
+
+/*
+ * Return an IO completion, waiting for it if necessary. Returns 0 with
+ * cqe_ptr filled in on success, -errno on failure.
+ */
+int io_uring_wait_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr)
+{
+ return __io_uring_get_cqe(ring, cqe_ptr, 1);
+}
+
+/*
+ * Submit sqes acquired from io_uring_get_sqe() to the kernel.
+ *
+ * Returns number of sqes submitted
+ */
+int io_uring_submit(struct io_uring *ring)
+{
+ struct io_uring_sq *sq = &ring->sq;
+ const unsigned mask = *sq->kring_mask;
+ unsigned ktail, ktail_next, submitted, to_submit;
+ int ret;
+
+ /*
+ * If we have pending IO in the kring, submit it first. We need a
+ * read barrier here to match the kernels store barrier when updating
+ * the SQ head.
+ */
+ read_barrier();
+ if (*sq->khead != *sq->ktail) {
+ submitted = *sq->kring_entries;
+ goto submit;
+ }
+
+ if (sq->sqe_head == sq->sqe_tail)
+ return 0;
+
+ /*
+ * Fill in sqes that we have queued up, adding them to the kernel ring
+ */
+ submitted = 0;
+ ktail = ktail_next = *sq->ktail;
+ to_submit = sq->sqe_tail - sq->sqe_head;
+ while (to_submit--) {
+ ktail_next++;
+ read_barrier();
+
+ sq->array[ktail & mask] = sq->sqe_head & mask;
+ ktail = ktail_next;
+
+ sq->sqe_head++;
+ submitted++;
+ }
+
+ if (!submitted)
+ return 0;
+
+ if (*sq->ktail != ktail) {
+ /*
+ * First write barrier ensures that the SQE stores are updated
+ * with the tail update. This is needed so that the kernel
+ * will never see a tail update without the preceeding sQE
+ * stores being done.
+ */
+ write_barrier();
+ *sq->ktail = ktail;
+ /*
+ * The kernel has the matching read barrier for reading the
+ * SQ tail.
+ */
+ write_barrier();
+ }
+
+submit:
+ ret = io_uring_enter(ring->ring_fd, submitted, 0,
+ IORING_ENTER_GETEVENTS, NULL);
+ if (ret < 0)
+ return -errno;
+
+ return ret;
+}
+
+/*
+ * Return an sqe to fill. Application must later call io_uring_submit()
+ * when it's ready to tell the kernel about it. The caller may call this
+ * function multiple times before calling io_uring_submit().
+ *
+ * Returns a vacant sqe, or NULL if we're full.
+ */
+struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring)
+{
+ struct io_uring_sq *sq = &ring->sq;
+ unsigned next = sq->sqe_tail + 1;
+ struct io_uring_sqe *sqe;
+
+ /*
+ * All sqes are used
+ */
+ if (next - sq->sqe_head > *sq->kring_entries)
+ return NULL;
+
+ sqe = &sq->sqes[sq->sqe_tail & *sq->kring_mask];
+ sq->sqe_tail = next;
+ return sqe;
+}
diff --git a/tools/io_uring/setup.c b/tools/io_uring/setup.c
new file mode 100644
index 000000000000..0b50fcd78520
--- /dev/null
+++ b/tools/io_uring/setup.c
@@ -0,0 +1,107 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+
+#include "liburing.h"
+
+static int io_uring_mmap(int fd, struct io_uring_params *p,
+ struct io_uring_sq *sq, struct io_uring_cq *cq)
+{
+ size_t size;
+ void *ptr;
+ int ret;
+
+ sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned);
+ ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
+ if (ptr == MAP_FAILED)
+ return -errno;
+ sq->khead = ptr + p->sq_off.head;
+ sq->ktail = ptr + p->sq_off.tail;
+ sq->kring_mask = ptr + p->sq_off.ring_mask;
+ sq->kring_entries = ptr + p->sq_off.ring_entries;
+ sq->kflags = ptr + p->sq_off.flags;
+ sq->kdropped = ptr + p->sq_off.dropped;
+ sq->array = ptr + p->sq_off.array;
+
+ size = p->sq_entries * sizeof(struct io_uring_sqe);
+ sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, fd,
+ IORING_OFF_SQES);
+ if (sq->sqes == MAP_FAILED) {
+ ret = -errno;
+err:
+ munmap(sq->khead, sq->ring_sz);
+ return ret;
+ }
+
+ cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe);
+ ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
+ if (ptr == MAP_FAILED) {
+ ret = -errno;
+ munmap(sq->sqes, p->sq_entries * sizeof(struct io_uring_sqe));
+ goto err;
+ }
+ cq->khead = ptr + p->cq_off.head;
+ cq->ktail = ptr + p->cq_off.tail;
+ cq->kring_mask = ptr + p->cq_off.ring_mask;
+ cq->kring_entries = ptr + p->cq_off.ring_entries;
+ cq->koverflow = ptr + p->cq_off.overflow;
+ cq->cqes = ptr + p->cq_off.cqes;
+ return 0;
+}
+
+/*
+ * For users that want to specify sq_thread_cpu or sq_thread_idle, this
+ * interface is a convenient helper for mmap()ing the rings.
+ * Returns -1 on error, or zero on success. On success, 'ring'
+ * contains the necessary information to read/write to the rings.
+ */
+int io_uring_queue_mmap(int fd, struct io_uring_params *p, struct io_uring *ring)
+{
+ int ret;
+
+ memset(ring, 0, sizeof(*ring));
+ ret = io_uring_mmap(fd, p, &ring->sq, &ring->cq);
+ if (!ret)
+ ring->ring_fd = fd;
+ return ret;
+}
+
+/*
+ * Returns -1 on error, or zero on success. On success, 'ring'
+ * contains the necessary information to read/write to the rings.
+ */
+int io_uring_queue_init(unsigned entries, struct io_uring *ring, unsigned flags)
+{
+ struct io_uring_params p;
+ int fd, ret;
+
+ memset(&p, 0, sizeof(p));
+ p.flags = flags;
+
+ fd = io_uring_setup(entries, &p);
+ if (fd < 0)
+ return fd;
+
+ ret = io_uring_queue_mmap(fd, &p, ring);
+ if (ret)
+ close(fd);
+
+ return ret;
+}
+
+void io_uring_queue_exit(struct io_uring *ring)
+{
+ struct io_uring_sq *sq = &ring->sq;
+ struct io_uring_cq *cq = &ring->cq;
+
+ munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe));
+ munmap(sq->khead, sq->ring_sz);
+ munmap(cq->khead, cq->ring_sz);
+ close(ring->ring_fd);
+}
diff --git a/tools/io_uring/syscall.c b/tools/io_uring/syscall.c
new file mode 100644
index 000000000000..b22e0aa54e9d
--- /dev/null
+++ b/tools/io_uring/syscall.c
@@ -0,0 +1,52 @@
+/*
+ * Will go away once libc support is there
+ */
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/uio.h>
+#include <signal.h>
+#include "liburing.h"
+
+#ifdef __alpha__
+/*
+ * alpha is the only exception, all other architectures
+ * have common numbers for new system calls.
+ */
+# ifndef __NR_io_uring_setup
+# define __NR_io_uring_setup 535
+# endif
+# ifndef __NR_io_uring_enter
+# define __NR_io_uring_enter 536
+# endif
+# ifndef __NR_io_uring_register
+# define __NR_io_uring_register 537
+# endif
+#else /* !__alpha__ */
+# ifndef __NR_io_uring_setup
+# define __NR_io_uring_setup 425
+# endif
+# ifndef __NR_io_uring_enter
+# define __NR_io_uring_enter 426
+# endif
+# ifndef __NR_io_uring_register
+# define __NR_io_uring_register 427
+# endif
+#endif
+
+int io_uring_register(int fd, unsigned int opcode, void *arg,
+ unsigned int nr_args)
+{
+ return syscall(__NR_io_uring_register, fd, opcode, arg, nr_args);
+}
+
+int io_uring_setup(unsigned int entries, struct io_uring_params *p)
+{
+ return syscall(__NR_io_uring_setup, entries, p);
+}
+
+int io_uring_enter(int fd, unsigned int to_submit, unsigned int min_complete,
+ unsigned int flags, sigset_t *sig)
+{
+ return syscall(__NR_io_uring_enter, fd, to_submit, min_complete,
+ flags, sig, _NSIG / 8);
+}
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 2ed395b817cb..ad1b9e646c49 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -1,4 +1,5 @@
#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0-only
#
# top-like utility for displaying kvm statistics
#
@@ -8,8 +9,6 @@
# Authors:
# Avi Kivity <avi@redhat.com>
#
-# This work is licensed under the terms of the GNU GPL, version 2. See
-# the COPYING file in the top-level directory.
"""The kvm_stat module outputs statistics about running KVM VMs
Three different ways of output formatting are available:
@@ -575,8 +574,12 @@ class TracepointProvider(Provider):
def update_fields(self, fields_filter):
"""Refresh fields, applying fields_filter"""
self.fields = [field for field in self._get_available_fields()
- if self.is_field_wanted(fields_filter, field) or
- ARCH.tracepoint_is_child(field)]
+ if self.is_field_wanted(fields_filter, field)]
+ # add parents for child fields - otherwise we won't see any output!
+ for field in self._fields:
+ parent = ARCH.tracepoint_is_child(field)
+ if (parent and parent not in self._fields):
+ self.fields.append(parent)
@staticmethod
def _get_online_cpus():
@@ -735,8 +738,12 @@ class DebugfsProvider(Provider):
def update_fields(self, fields_filter):
"""Refresh fields, applying fields_filter"""
self._fields = [field for field in self._get_available_fields()
- if self.is_field_wanted(fields_filter, field) or
- ARCH.debugfs_is_child(field)]
+ if self.is_field_wanted(fields_filter, field)]
+ # add parents for child fields - otherwise we won't see any output!
+ for field in self._fields:
+ parent = ARCH.debugfs_is_child(field)
+ if (parent and parent not in self._fields):
+ self.fields.append(parent)
@property
def fields(self):
diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt
index 0811d860fe75..c057ba52364e 100644
--- a/tools/kvm/kvm_stat/kvm_stat.txt
+++ b/tools/kvm/kvm_stat/kvm_stat.txt
@@ -34,6 +34,8 @@ INTERACTIVE COMMANDS
*c*:: clear filter
*f*:: filter by regular expression
+ :: *Note*: Child events pull in their parents, and parents' stats summarize
+ all child events, not just the filtered ones
*g*:: filter by guest name/PID
diff --git a/tools/laptop/freefall/freefall.c b/tools/laptop/freefall/freefall.c
index 5e44b20b1848..d29a86cda87f 100644
--- a/tools/laptop/freefall/freefall.c
+++ b/tools/laptop/freefall/freefall.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Disk protection for HP/DELL machines.
*
* Copyright 2008 Eric Piel
* Copyright 2009 Pavel Machek <pavel@ucw.cz>
* Copyright 2012 Sonal Santan
* Copyright 2014 Pali Rohár <pali.rohar@gmail.com>
- *
- * GPLv2.
*/
#include <stdio.h>
diff --git a/tools/lib/api/fd/array.c b/tools/lib/api/fd/array.c
index b0a035fc87b3..58d44d5eee31 100644
--- a/tools/lib/api/fd/array.c
+++ b/tools/lib/api/fd/array.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
- *
- * Released under the GPL v2. (and only v2, not any later version)
*/
#include "array.h"
#include <errno.h>
diff --git a/tools/lib/bitmap.c b/tools/lib/bitmap.c
index 38748b0e342f..38494782be06 100644
--- a/tools/lib/bitmap.c
+++ b/tools/lib/bitmap.c
@@ -1,9 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* From lib/bitmap.c
* Helper functions for bitmap.h.
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
*/
#include <linux/bitmap.h>
diff --git a/tools/lib/bpf/.gitignore b/tools/lib/bpf/.gitignore
index 4db74758c674..d9e9dec04605 100644
--- a/tools/lib/bpf/.gitignore
+++ b/tools/lib/bpf/.gitignore
@@ -1,3 +1,5 @@
libbpf_version.h
+libbpf.pc
FEATURE-DUMP.libbpf
test_libbpf
+libbpf.so.*
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 197b40f5b5c6..ee9d5362f35b 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1 +1 @@
-libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o
+libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 34d9c3619c96..f91639bf5650 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -3,7 +3,7 @@
BPF_VERSION = 0
BPF_PATCHLEVEL = 0
-BPF_EXTRAVERSION = 1
+BPF_EXTRAVERSION = 3
MAKEFLAGS += --no-print-directory
@@ -14,21 +14,6 @@ srctree := $(patsubst %/,%,$(dir $(srctree)))
#$(info Determined 'srctree' to be $(srctree))
endif
-# Makefiles suck: This macro sets a default value of $(2) for the
-# variable named by $(1), unless the variable has been set by
-# environment or command line. This is necessary for CC and AR
-# because make sets default values, so the simpler ?= approach
-# won't work as expected.
-define allow-override
- $(if $(or $(findstring environment,$(origin $(1))),\
- $(findstring command line,$(origin $(1)))),,\
- $(eval $(1) = $(2)))
-endef
-
-# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
-$(call allow-override,CC,$(CROSS_COMPILE)gcc)
-$(call allow-override,AR,$(CROSS_COMPILE)ar)
-
INSTALL = install
# Use DESTDIR for installing into a different root directory.
@@ -54,7 +39,7 @@ man_dir_SQ = '$(subst ','\'',$(man_dir))'
export man_dir man_dir_SQ INSTALL
export DESTDIR DESTDIR_SQ
-include ../../scripts/Makefile.include
+include $(srctree)/tools/scripts/Makefile.include
# copy a bit from Linux kbuild
@@ -94,8 +79,6 @@ export prefix libdir src obj
libdir_SQ = $(subst ','\'',$(libdir))
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
-LIB_FILE = libbpf.a libbpf.so
-
VERSION = $(BPF_VERSION)
PATCHLEVEL = $(BPF_PATCHLEVEL)
EXTRAVERSION = $(BPF_EXTRAVERSION)
@@ -103,7 +86,11 @@ EXTRAVERSION = $(BPF_EXTRAVERSION)
OBJ = $@
N =
-LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION)
+LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION)
+
+LIB_TARGET = libbpf.a libbpf.so.$(LIBBPF_VERSION)
+LIB_FILE = libbpf.a libbpf.so*
+PC_FILE = libbpf.pc
# Set compile option CFLAGS
ifdef EXTRA_CFLAGS
@@ -143,16 +130,19 @@ all:
export srctree OUTPUT CC LD CFLAGS V
include $(srctree)/tools/build/Makefile.include
-BPF_IN := $(OUTPUT)libbpf-in.o
-LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
-VERSION_SCRIPT := libbpf.map
+BPF_IN := $(OUTPUT)libbpf-in.o
+VERSION_SCRIPT := libbpf.map
+
+LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
+LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
+PC_FILE := $(addprefix $(OUTPUT),$(PC_FILE))
-GLOBAL_SYM_COUNT = $(shell readelf -s $(BPF_IN) | \
+GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}')
-VERSIONED_SYM_COUNT = $(shell readelf -s $(OUTPUT)libbpf.so | \
+VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \
grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
-CMD_TARGETS = $(LIB_FILE)
+CMD_TARGETS = $(LIB_TARGET) $(PC_FILE)
CXX_TEST_TARGET = $(OUTPUT)test_libbpf
@@ -162,7 +152,8 @@ endif
TARGETS = $(CMD_TARGETS)
-all: fixdep all_cmd
+all: fixdep
+ $(Q)$(MAKE) all_cmd
all_cmd: $(CMD_TARGETS) check
@@ -179,17 +170,30 @@ $(BPF_IN): force elfdep bpfdep
@(test -f ../../include/uapi/linux/if_link.h -a -f ../../../include/uapi/linux/if_link.h && ( \
(diff -B ../../include/uapi/linux/if_link.h ../../../include/uapi/linux/if_link.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_link.h' differs from latest version at 'include/uapi/linux/if_link.h'" >&2 )) || true
+ @(test -f ../../include/uapi/linux/if_xdp.h -a -f ../../../include/uapi/linux/if_xdp.h && ( \
+ (diff -B ../../include/uapi/linux/if_xdp.h ../../../include/uapi/linux/if_xdp.h >/dev/null) || \
+ echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
$(Q)$(MAKE) $(build)=libbpf
-$(OUTPUT)libbpf.so: $(BPF_IN)
- $(QUIET_LINK)$(CC) --shared -Wl,--version-script=$(VERSION_SCRIPT) \
- $^ -o $@
+$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
+
+$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
+ $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \
+ -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
+ @ln -sf $(@F) $(OUTPUT)libbpf.so
+ @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION)
$(OUTPUT)libbpf.a: $(BPF_IN)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
$(OUTPUT)test_libbpf: test_libbpf.cpp $(OUTPUT)libbpf.a
- $(QUIET_LINK)$(CXX) $^ -lelf -o $@
+ $(QUIET_LINK)$(CXX) $(INCLUDES) $^ -lelf -o $@
+
+$(OUTPUT)libbpf.pc:
+ $(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \
+ -e "s|@LIBDIR@|$(libdir_SQ)|" \
+ -e "s|@VERSION@|$(LIBBPF_VERSION)|" \
+ < libbpf.pc.template > $@
check: check_abi
@@ -203,6 +207,12 @@ check_abi: $(OUTPUT)libbpf.so
exit 1; \
fi
+define do_install_mkdir
+ if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
+ fi
+endef
+
define do_install
if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
@@ -211,16 +221,23 @@ define do_install
endef
install_lib: all_cmd
- $(call QUIET_INSTALL, $(LIB_FILE)) \
- $(call do_install,$(LIB_FILE),$(libdir_SQ))
+ $(call QUIET_INSTALL, $(LIB_TARGET)) \
+ $(call do_install_mkdir,$(libdir_SQ)); \
+ cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
install_headers:
$(call QUIET_INSTALL, headers) \
$(call do_install,bpf.h,$(prefix)/include/bpf,644); \
- $(call do_install,libbpf.h,$(prefix)/include/bpf,644);
- $(call do_install,btf.h,$(prefix)/include/bpf,644);
+ $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
+ $(call do_install,btf.h,$(prefix)/include/bpf,644); \
+ $(call do_install,libbpf_util.h,$(prefix)/include/bpf,644); \
+ $(call do_install,xsk.h,$(prefix)/include/bpf,644);
+
+install_pkgconfig: $(PC_FILE)
+ $(call QUIET_INSTALL, $(PC_FILE)) \
+ $(call do_install,$(PC_FILE),$(libdir_SQ)/pkgconfig,644)
-install: install_lib
+install: install_lib install_pkgconfig
### Cleaning rules
@@ -230,7 +247,7 @@ config-clean:
clean:
$(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \
- *.o *~ *.a *.so .*.d .*.cmd LIBBPF-CFLAGS
+ *.o *~ *.a *.so *.so.$(VERSION) .*.d .*.cmd *.pc LIBBPF-CFLAGS
$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
diff --git a/tools/lib/bpf/README.rst b/tools/lib/bpf/README.rst
index 607aae40f4ed..cef7b77eab69 100644
--- a/tools/lib/bpf/README.rst
+++ b/tools/lib/bpf/README.rst
@@ -9,7 +9,7 @@ described here. It's recommended to follow these conventions whenever a
new function or type is added to keep libbpf API clean and consistent.
All types and functions provided by libbpf API should have one of the
-following prefixes: ``bpf_``, ``btf_``, ``libbpf_``.
+following prefixes: ``bpf_``, ``btf_``, ``libbpf_``, ``xsk_``.
System call wrappers
--------------------
@@ -62,6 +62,19 @@ Auxiliary functions and types that don't fit well in any of categories
described above should have ``libbpf_`` prefix, e.g.
``libbpf_get_error`` or ``libbpf_prog_type_by_name``.
+AF_XDP functions
+-------------------
+
+AF_XDP functions should have an ``xsk_`` prefix, e.g.
+``xsk_umem__get_data`` or ``xsk_umem__create``. The interface consists
+of both low-level ring access functions and high-level configuration
+functions. These can be mixed and matched. Note that these functions
+are not reentrant for performance reasons.
+
+Please take a look at Documentation/networking/af_xdp.rst in the Linux
+kernel source tree on how to use XDP sockets and for some common
+mistakes in case you do not get any traffic up to user space.
+
libbpf ABI
==========
@@ -98,6 +111,7 @@ starting from ``0.0.1``.
Every time ABI is being changed, e.g. because a new symbol is added or
semantic of existing symbol is changed, ABI version should be bumped.
+This bump in ABI version is at most once per kernel development cycle.
For example, if current state of ``libbpf.map`` is:
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 88cbd110ae58..c4a48086dc9a 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -22,6 +22,7 @@
*/
#include <stdlib.h>
+#include <string.h>
#include <memory.h>
#include <unistd.h>
#include <asm/unistd.h>
@@ -45,6 +46,8 @@
# define __NR_bpf 349
# elif defined(__s390__)
# define __NR_bpf 351
+# elif defined(__arc__)
+# define __NR_bpf 280
# else
# error __NR_bpf not defined. libbpf does not support your arch.
# endif
@@ -78,7 +81,6 @@ static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
{
- __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0;
union bpf_attr attr;
memset(&attr, '\0', sizeof(attr));
@@ -88,8 +90,9 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
attr.value_size = create_attr->value_size;
attr.max_entries = create_attr->max_entries;
attr.map_flags = create_attr->map_flags;
- memcpy(attr.map_name, create_attr->name,
- min(name_len, BPF_OBJ_NAME_LEN - 1));
+ if (create_attr->name)
+ memcpy(attr.map_name, create_attr->name,
+ min(strlen(create_attr->name), BPF_OBJ_NAME_LEN - 1));
attr.numa_node = create_attr->numa_node;
attr.btf_fd = create_attr->btf_fd;
attr.btf_key_type_id = create_attr->btf_key_type_id;
@@ -154,7 +157,6 @@ int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
int key_size, int inner_map_fd, int max_entries,
__u32 map_flags, int node)
{
- __u32 name_len = name ? strlen(name) : 0;
union bpf_attr attr;
memset(&attr, '\0', sizeof(attr));
@@ -165,7 +167,9 @@ int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
attr.inner_map_fd = inner_map_fd;
attr.max_entries = max_entries;
attr.map_flags = map_flags;
- memcpy(attr.map_name, name, min(name_len, BPF_OBJ_NAME_LEN - 1));
+ if (name)
+ memcpy(attr.map_name, name,
+ min(strlen(name), BPF_OBJ_NAME_LEN - 1));
if (node >= 0) {
attr.map_flags |= BPF_F_NUMA_NODE;
@@ -214,23 +218,32 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
{
void *finfo = NULL, *linfo = NULL;
union bpf_attr attr;
- __u32 name_len;
+ __u32 log_level;
int fd;
- if (!load_attr)
+ if (!load_attr || !log_buf != !log_buf_sz)
return -EINVAL;
- name_len = load_attr->name ? strlen(load_attr->name) : 0;
+ log_level = load_attr->log_level;
+ if (log_level > (4 | 2 | 1) || (log_level && !log_buf))
+ return -EINVAL;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.prog_type = load_attr->prog_type;
attr.expected_attach_type = load_attr->expected_attach_type;
attr.insn_cnt = (__u32)load_attr->insns_cnt;
attr.insns = ptr_to_u64(load_attr->insns);
attr.license = ptr_to_u64(load_attr->license);
- attr.log_buf = ptr_to_u64(NULL);
- attr.log_size = 0;
- attr.log_level = 0;
+
+ attr.log_level = log_level;
+ if (log_level) {
+ attr.log_buf = ptr_to_u64(log_buf);
+ attr.log_size = log_buf_sz;
+ } else {
+ attr.log_buf = ptr_to_u64(NULL);
+ attr.log_size = 0;
+ }
+
attr.kern_version = load_attr->kern_version;
attr.prog_ifindex = load_attr->prog_ifindex;
attr.prog_btf_fd = load_attr->prog_btf_fd;
@@ -240,8 +253,9 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
attr.line_info_rec_size = load_attr->line_info_rec_size;
attr.line_info_cnt = load_attr->line_info_cnt;
attr.line_info = ptr_to_u64(load_attr->line_info);
- memcpy(attr.prog_name, load_attr->name,
- min(name_len, BPF_OBJ_NAME_LEN - 1));
+ if (load_attr->name)
+ memcpy(attr.prog_name, load_attr->name,
+ min(strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
fd = sys_bpf_prog_load(&attr, sizeof(attr));
if (fd >= 0)
@@ -286,7 +300,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
goto done;
}
- if (!log_buf || !log_buf_sz)
+ if (log_level || !log_buf)
goto done;
/* Try again with log */
@@ -327,7 +341,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.prog_type = type;
attr.insn_cnt = (__u32)insns_cnt;
attr.insns = ptr_to_u64(insns);
@@ -347,7 +361,7 @@ int bpf_map_update_elem(int fd, const void *key, const void *value,
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
@@ -360,10 +374,23 @@ int bpf_map_lookup_elem(int fd, const void *key, void *value)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
+ attr.map_fd = fd;
+ attr.key = ptr_to_u64(key);
+ attr.value = ptr_to_u64(value);
+
+ return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
+}
+
+int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
+{
+ union bpf_attr attr;
+
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
+ attr.flags = flags;
return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
}
@@ -372,7 +399,7 @@ int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
@@ -384,7 +411,7 @@ int bpf_map_delete_elem(int fd, const void *key)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
@@ -395,7 +422,7 @@ int bpf_map_get_next_key(int fd, const void *key, void *next_key)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.next_key = ptr_to_u64(next_key);
@@ -403,11 +430,21 @@ int bpf_map_get_next_key(int fd, const void *key, void *next_key)
return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
}
+int bpf_map_freeze(int fd)
+{
+ union bpf_attr attr;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.map_fd = fd;
+
+ return sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr));
+}
+
int bpf_obj_pin(int fd, const char *pathname)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.pathname = ptr_to_u64((void *)pathname);
attr.bpf_fd = fd;
@@ -418,7 +455,7 @@ int bpf_obj_get(const char *pathname)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.pathname = ptr_to_u64((void *)pathname);
return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
@@ -429,7 +466,7 @@ int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.target_fd = target_fd;
attr.attach_bpf_fd = prog_fd;
attr.attach_type = type;
@@ -442,7 +479,7 @@ int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.target_fd = target_fd;
attr.attach_type = type;
@@ -453,7 +490,7 @@ int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.target_fd = target_fd;
attr.attach_bpf_fd = prog_fd;
attr.attach_type = type;
@@ -467,7 +504,7 @@ int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
union bpf_attr attr;
int ret;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.query.target_fd = target_fd;
attr.query.attach_type = type;
attr.query.query_flags = query_flags;
@@ -488,7 +525,7 @@ int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
union bpf_attr attr;
int ret;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.test.prog_fd = prog_fd;
attr.test.data_in = ptr_to_u64(data);
attr.test.data_out = ptr_to_u64(data_out);
@@ -513,16 +550,21 @@ int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
if (!test_attr->data_out && test_attr->data_size_out > 0)
return -EINVAL;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.test.prog_fd = test_attr->prog_fd;
attr.test.data_in = ptr_to_u64(test_attr->data_in);
attr.test.data_out = ptr_to_u64(test_attr->data_out);
attr.test.data_size_in = test_attr->data_size_in;
attr.test.data_size_out = test_attr->data_size_out;
+ attr.test.ctx_in = ptr_to_u64(test_attr->ctx_in);
+ attr.test.ctx_out = ptr_to_u64(test_attr->ctx_out);
+ attr.test.ctx_size_in = test_attr->ctx_size_in;
+ attr.test.ctx_size_out = test_attr->ctx_size_out;
attr.test.repeat = test_attr->repeat;
ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
test_attr->data_size_out = attr.test.data_size_out;
+ test_attr->ctx_size_out = attr.test.ctx_size_out;
test_attr->retval = attr.test.retval;
test_attr->duration = attr.test.duration;
return ret;
@@ -533,7 +575,7 @@ int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
union bpf_attr attr;
int err;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.start_id = start_id;
err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr));
@@ -548,7 +590,7 @@ int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
union bpf_attr attr;
int err;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.start_id = start_id;
err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr));
@@ -562,7 +604,7 @@ int bpf_prog_get_fd_by_id(__u32 id)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.prog_id = id;
return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
@@ -572,7 +614,7 @@ int bpf_map_get_fd_by_id(__u32 id)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.map_id = id;
return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
@@ -582,7 +624,7 @@ int bpf_btf_get_fd_by_id(__u32 id)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.btf_id = id;
return sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
@@ -593,7 +635,7 @@ int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
union bpf_attr attr;
int err;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.info.bpf_fd = prog_fd;
attr.info.info_len = *info_len;
attr.info.info = ptr_to_u64(info);
@@ -609,7 +651,7 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
{
union bpf_attr attr;
- bzero(&attr, sizeof(attr));
+ memset(&attr, 0, sizeof(attr));
attr.raw_tracepoint.name = ptr_to_u64(name);
attr.raw_tracepoint.prog_fd = prog_fd;
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 8f09de482839..9593fec75652 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -26,6 +26,7 @@
#include <linux/bpf.h>
#include <stdbool.h>
#include <stddef.h>
+#include <stdint.h>
#ifdef __cplusplus
extern "C" {
@@ -85,13 +86,14 @@ struct bpf_load_program_attr {
__u32 line_info_rec_size;
const void *line_info;
__u32 line_info_cnt;
+ __u32 log_level;
};
/* Flags to direct loading requirements */
#define MAPS_RELAX_COMPAT 0x01
/* Recommend log buffer size */
-#define BPF_LOG_BUF_SIZE (256 * 1024)
+#define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */
LIBBPF_API int
bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
char *log_buf, size_t log_buf_sz);
@@ -110,10 +112,13 @@ LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value,
__u64 flags);
LIBBPF_API int bpf_map_lookup_elem(int fd, const void *key, void *value);
+LIBBPF_API int bpf_map_lookup_elem_flags(int fd, const void *key, void *value,
+ __u64 flags);
LIBBPF_API int bpf_map_lookup_and_delete_elem(int fd, const void *key,
void *value);
LIBBPF_API int bpf_map_delete_elem(int fd, const void *key);
LIBBPF_API int bpf_map_get_next_key(int fd, const void *key, void *next_key);
+LIBBPF_API int bpf_map_freeze(int fd);
LIBBPF_API int bpf_obj_pin(int fd, const char *pathname);
LIBBPF_API int bpf_obj_get(const char *pathname);
LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd,
@@ -132,6 +137,11 @@ struct bpf_prog_test_run_attr {
* out: length of data_out */
__u32 retval; /* out: return code of the BPF program */
__u32 duration; /* out: average per repetition in ns */
+ const void *ctx_in; /* optional */
+ __u32 ctx_size_in;
+ void *ctx_out; /* optional */
+ __u32 ctx_size_out; /* in: max length of ctx_out
+ * out: length of cxt_out */
};
LIBBPF_API int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr);
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index d682d3b8f7b9..03348c4d6bd4 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2018 Facebook */
+#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
@@ -9,18 +10,22 @@
#include <linux/btf.h>
#include "btf.h"
#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_internal.h"
-#define elog(fmt, ...) { if (err_log) err_log(fmt, ##__VA_ARGS__); }
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
-#define BTF_MAX_NR_TYPES 65535
+#define BTF_MAX_NR_TYPES 0x7fffffff
+#define BTF_MAX_STR_OFFSET 0x7fffffff
#define IS_MODIFIER(k) (((k) == BTF_KIND_TYPEDEF) || \
((k) == BTF_KIND_VOLATILE) || \
((k) == BTF_KIND_CONST) || \
((k) == BTF_KIND_RESTRICT))
+#define IS_VAR(k) ((k) == BTF_KIND_VAR)
+
static struct btf_type btf_void;
struct btf {
@@ -39,9 +44,8 @@ struct btf {
struct btf_ext_info {
/*
- * info points to a deep copy of the individual info section
- * (e.g. func_info and line_info) from the .BTF.ext.
- * It does not include the __u32 rec_size.
+ * info points to the individual info section (e.g. func_info and
+ * line_info) from the .BTF.ext. It does not include the __u32 rec_size.
*/
void *info;
__u32 rec_size;
@@ -49,8 +53,13 @@ struct btf_ext_info {
};
struct btf_ext {
+ union {
+ struct btf_ext_header *hdr;
+ void *data;
+ };
struct btf_ext_info func_info;
struct btf_ext_info line_info;
+ __u32 data_size;
};
struct btf_ext_info_sec {
@@ -107,54 +116,54 @@ static int btf_add_type(struct btf *btf, struct btf_type *t)
return 0;
}
-static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log)
+static int btf_parse_hdr(struct btf *btf)
{
const struct btf_header *hdr = btf->hdr;
__u32 meta_left;
if (btf->data_size < sizeof(struct btf_header)) {
- elog("BTF header not found\n");
+ pr_debug("BTF header not found\n");
return -EINVAL;
}
if (hdr->magic != BTF_MAGIC) {
- elog("Invalid BTF magic:%x\n", hdr->magic);
+ pr_debug("Invalid BTF magic:%x\n", hdr->magic);
return -EINVAL;
}
if (hdr->version != BTF_VERSION) {
- elog("Unsupported BTF version:%u\n", hdr->version);
+ pr_debug("Unsupported BTF version:%u\n", hdr->version);
return -ENOTSUP;
}
if (hdr->flags) {
- elog("Unsupported BTF flags:%x\n", hdr->flags);
+ pr_debug("Unsupported BTF flags:%x\n", hdr->flags);
return -ENOTSUP;
}
meta_left = btf->data_size - sizeof(*hdr);
if (!meta_left) {
- elog("BTF has no data\n");
+ pr_debug("BTF has no data\n");
return -EINVAL;
}
if (meta_left < hdr->type_off) {
- elog("Invalid BTF type section offset:%u\n", hdr->type_off);
+ pr_debug("Invalid BTF type section offset:%u\n", hdr->type_off);
return -EINVAL;
}
if (meta_left < hdr->str_off) {
- elog("Invalid BTF string section offset:%u\n", hdr->str_off);
+ pr_debug("Invalid BTF string section offset:%u\n", hdr->str_off);
return -EINVAL;
}
if (hdr->type_off >= hdr->str_off) {
- elog("BTF type section offset >= string section offset. No type?\n");
+ pr_debug("BTF type section offset >= string section offset. No type?\n");
return -EINVAL;
}
if (hdr->type_off & 0x02) {
- elog("BTF type section is not aligned to 4 bytes\n");
+ pr_debug("BTF type section is not aligned to 4 bytes\n");
return -EINVAL;
}
@@ -163,15 +172,15 @@ static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log)
return 0;
}
-static int btf_parse_str_sec(struct btf *btf, btf_print_fn_t err_log)
+static int btf_parse_str_sec(struct btf *btf)
{
const struct btf_header *hdr = btf->hdr;
const char *start = btf->nohdr_data + hdr->str_off;
const char *end = start + btf->hdr->str_len;
- if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
+ if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET ||
start[0] || end[-1]) {
- elog("Invalid BTF string section\n");
+ pr_debug("Invalid BTF string section\n");
return -EINVAL;
}
@@ -180,7 +189,42 @@ static int btf_parse_str_sec(struct btf *btf, btf_print_fn_t err_log)
return 0;
}
-static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
+static int btf_type_size(struct btf_type *t)
+{
+ int base_size = sizeof(struct btf_type);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_FWD:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ return base_size;
+ case BTF_KIND_INT:
+ return base_size + sizeof(__u32);
+ case BTF_KIND_ENUM:
+ return base_size + vlen * sizeof(struct btf_enum);
+ case BTF_KIND_ARRAY:
+ return base_size + sizeof(struct btf_array);
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ return base_size + vlen * sizeof(struct btf_member);
+ case BTF_KIND_FUNC_PROTO:
+ return base_size + vlen * sizeof(struct btf_param);
+ case BTF_KIND_VAR:
+ return base_size + sizeof(struct btf_var);
+ case BTF_KIND_DATASEC:
+ return base_size + vlen * sizeof(struct btf_var_secinfo);
+ default:
+ pr_debug("Unsupported BTF_KIND:%u\n", BTF_INFO_KIND(t->info));
+ return -EINVAL;
+ }
+}
+
+static int btf_parse_type_sec(struct btf *btf)
{
struct btf_header *hdr = btf->hdr;
void *nohdr_data = btf->nohdr_data;
@@ -189,41 +233,13 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
while (next_type < end_type) {
struct btf_type *t = next_type;
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ int type_size;
int err;
- next_type += sizeof(*t);
- switch (BTF_INFO_KIND(t->info)) {
- case BTF_KIND_INT:
- next_type += sizeof(int);
- break;
- case BTF_KIND_ARRAY:
- next_type += sizeof(struct btf_array);
- break;
- case BTF_KIND_STRUCT:
- case BTF_KIND_UNION:
- next_type += vlen * sizeof(struct btf_member);
- break;
- case BTF_KIND_ENUM:
- next_type += vlen * sizeof(struct btf_enum);
- break;
- case BTF_KIND_FUNC_PROTO:
- next_type += vlen * sizeof(struct btf_param);
- break;
- case BTF_KIND_FUNC:
- case BTF_KIND_TYPEDEF:
- case BTF_KIND_PTR:
- case BTF_KIND_FWD:
- case BTF_KIND_VOLATILE:
- case BTF_KIND_CONST:
- case BTF_KIND_RESTRICT:
- break;
- default:
- elog("Unsupported BTF_KIND:%u\n",
- BTF_INFO_KIND(t->info));
- return -EINVAL;
- }
-
+ type_size = btf_type_size(t);
+ if (type_size < 0)
+ return type_size;
+ next_type += type_size;
err = btf_add_type(btf, t);
if (err)
return err;
@@ -232,6 +248,11 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
return 0;
}
+__u32 btf__get_nr_types(const struct btf *btf)
+{
+ return btf->nr_types;
+}
+
const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
{
if (type_id > btf->nr_types)
@@ -250,21 +271,6 @@ static bool btf_type_is_void_or_null(const struct btf_type *t)
return !t || btf_type_is_void(t);
}
-static __s64 btf_type_size(const struct btf_type *t)
-{
- switch (BTF_INFO_KIND(t->info)) {
- case BTF_KIND_INT:
- case BTF_KIND_STRUCT:
- case BTF_KIND_UNION:
- case BTF_KIND_ENUM:
- return t->size;
- case BTF_KIND_PTR:
- return sizeof(void *);
- default:
- return -EINVAL;
- }
-}
-
#define MAX_RESOLVE_DEPTH 32
__s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
@@ -278,15 +284,22 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
t = btf__type_by_id(btf, type_id);
for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
i++) {
- size = btf_type_size(t);
- if (size >= 0)
- break;
-
switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_INT:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_DATASEC:
+ size = t->size;
+ goto done;
+ case BTF_KIND_PTR:
+ size = sizeof(void *);
+ goto done;
case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
+ case BTF_KIND_VAR:
type_id = t->type;
break;
case BTF_KIND_ARRAY:
@@ -306,6 +319,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
if (size < 0)
return -EINVAL;
+done:
if (nelems && size > UINT32_MAX / nelems)
return -E2BIG;
@@ -320,7 +334,8 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id)
t = btf__type_by_id(btf, type_id);
while (depth < MAX_RESOLVE_DEPTH &&
!btf_type_is_void_or_null(t) &&
- IS_MODIFIER(BTF_INFO_KIND(t->info))) {
+ (IS_MODIFIER(BTF_INFO_KIND(t->info)) ||
+ IS_VAR(BTF_INFO_KIND(t->info)))) {
type_id = t->type;
t = btf__type_by_id(btf, type_id);
depth++;
@@ -363,10 +378,8 @@ void btf__free(struct btf *btf)
free(btf);
}
-struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
+struct btf *btf__new(__u8 *data, __u32 size)
{
- __u32 log_buf_size = 0;
- char *log_buf = NULL;
struct btf *btf;
int err;
@@ -376,16 +389,6 @@ struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
btf->fd = -1;
- if (err_log) {
- log_buf = malloc(BPF_LOG_BUF_SIZE);
- if (!log_buf) {
- err = -ENOMEM;
- goto done;
- }
- *log_buf = 0;
- log_buf_size = BPF_LOG_BUF_SIZE;
- }
-
btf->data = malloc(size);
if (!btf->data) {
err = -ENOMEM;
@@ -395,30 +398,17 @@ struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
memcpy(btf->data, data, size);
btf->data_size = size;
- btf->fd = bpf_load_btf(btf->data, btf->data_size,
- log_buf, log_buf_size, false);
-
- if (btf->fd == -1) {
- err = -errno;
- elog("Error loading BTF: %s(%d)\n", strerror(errno), errno);
- if (log_buf && *log_buf)
- elog("%s\n", log_buf);
- goto done;
- }
-
- err = btf_parse_hdr(btf, err_log);
+ err = btf_parse_hdr(btf);
if (err)
goto done;
- err = btf_parse_str_sec(btf, err_log);
+ err = btf_parse_str_sec(btf);
if (err)
goto done;
- err = btf_parse_type_sec(btf, err_log);
+ err = btf_parse_type_sec(btf);
done:
- free(log_buf);
-
if (err) {
btf__free(btf);
return ERR_PTR(err);
@@ -427,11 +417,133 @@ done:
return btf;
}
+static int compare_vsi_off(const void *_a, const void *_b)
+{
+ const struct btf_var_secinfo *a = _a;
+ const struct btf_var_secinfo *b = _b;
+
+ return a->offset - b->offset;
+}
+
+static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
+ struct btf_type *t)
+{
+ __u32 size = 0, off = 0, i, vars = BTF_INFO_VLEN(t->info);
+ const char *name = btf__name_by_offset(btf, t->name_off);
+ const struct btf_type *t_var;
+ struct btf_var_secinfo *vsi;
+ struct btf_var *var;
+ int ret;
+
+ if (!name) {
+ pr_debug("No name found in string section for DATASEC kind.\n");
+ return -ENOENT;
+ }
+
+ ret = bpf_object__section_size(obj, name, &size);
+ if (ret || !size || (t->size && t->size != size)) {
+ pr_debug("Invalid size for section %s: %u bytes\n", name, size);
+ return -ENOENT;
+ }
+
+ t->size = size;
+
+ for (i = 0, vsi = (struct btf_var_secinfo *)(t + 1);
+ i < vars; i++, vsi++) {
+ t_var = btf__type_by_id(btf, vsi->type);
+ var = (struct btf_var *)(t_var + 1);
+
+ if (BTF_INFO_KIND(t_var->info) != BTF_KIND_VAR) {
+ pr_debug("Non-VAR type seen in section %s\n", name);
+ return -EINVAL;
+ }
+
+ if (var->linkage == BTF_VAR_STATIC)
+ continue;
+
+ name = btf__name_by_offset(btf, t_var->name_off);
+ if (!name) {
+ pr_debug("No name found in string section for VAR kind\n");
+ return -ENOENT;
+ }
+
+ ret = bpf_object__variable_offset(obj, name, &off);
+ if (ret) {
+ pr_debug("No offset found in symbol table for VAR %s\n", name);
+ return -ENOENT;
+ }
+
+ vsi->offset = off;
+ }
+
+ qsort(t + 1, vars, sizeof(*vsi), compare_vsi_off);
+ return 0;
+}
+
+int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
+{
+ int err = 0;
+ __u32 i;
+
+ for (i = 1; i <= btf->nr_types; i++) {
+ struct btf_type *t = btf->types[i];
+
+ /* Loader needs to fix up some of the things compiler
+ * couldn't get its hands on while emitting BTF. This
+ * is section size and global variable offset. We use
+ * the info from the ELF itself for this purpose.
+ */
+ if (BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC) {
+ err = btf_fixup_datasec(obj, btf, t);
+ if (err)
+ break;
+ }
+ }
+
+ return err;
+}
+
+int btf__load(struct btf *btf)
+{
+ __u32 log_buf_size = BPF_LOG_BUF_SIZE;
+ char *log_buf = NULL;
+ int err = 0;
+
+ if (btf->fd >= 0)
+ return -EEXIST;
+
+ log_buf = malloc(log_buf_size);
+ if (!log_buf)
+ return -ENOMEM;
+
+ *log_buf = 0;
+
+ btf->fd = bpf_load_btf(btf->data, btf->data_size,
+ log_buf, log_buf_size, false);
+ if (btf->fd < 0) {
+ err = -errno;
+ pr_warning("Error loading BTF: %s(%d)\n", strerror(errno), errno);
+ if (*log_buf)
+ pr_warning("%s\n", log_buf);
+ goto done;
+ }
+
+done:
+ free(log_buf);
+ return err;
+}
+
int btf__fd(const struct btf *btf)
{
return btf->fd;
}
+const void *btf__get_raw_data(const struct btf *btf, __u32 *size)
+{
+ *size = btf->data_size;
+ return btf->data;
+}
+
const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
{
if (offset < btf->hdr->str_len)
@@ -467,7 +579,7 @@ int btf__get_from_id(__u32 id, struct btf **btf)
goto exit_free;
}
- bzero(ptr, last_size);
+ memset(ptr, 0, last_size);
btf_info.btf = ptr_to_u64(ptr);
err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
@@ -481,7 +593,7 @@ int btf__get_from_id(__u32 id, struct btf **btf)
goto exit_free;
}
ptr = temp_ptr;
- bzero(ptr, last_size);
+ memset(ptr, 0, last_size);
btf_info.btf = ptr_to_u64(ptr);
err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
}
@@ -491,7 +603,7 @@ int btf__get_from_id(__u32 id, struct btf **btf)
goto exit_free;
}
- *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size, NULL);
+ *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size);
if (IS_ERR(*btf)) {
err = PTR_ERR(*btf);
*btf = NULL;
@@ -504,7 +616,79 @@ exit_free:
return err;
}
-struct btf_ext_sec_copy_param {
+int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
+ __u32 expected_key_size, __u32 expected_value_size,
+ __u32 *key_type_id, __u32 *value_type_id)
+{
+ const struct btf_type *container_type;
+ const struct btf_member *key, *value;
+ const size_t max_name = 256;
+ char container_name[max_name];
+ __s64 key_size, value_size;
+ __s32 container_id;
+
+ if (snprintf(container_name, max_name, "____btf_map_%s", map_name) ==
+ max_name) {
+ pr_warning("map:%s length of '____btf_map_%s' is too long\n",
+ map_name, map_name);
+ return -EINVAL;
+ }
+
+ container_id = btf__find_by_name(btf, container_name);
+ if (container_id < 0) {
+ pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
+ map_name, container_name);
+ return container_id;
+ }
+
+ container_type = btf__type_by_id(btf, container_id);
+ if (!container_type) {
+ pr_warning("map:%s cannot find BTF type for container_id:%u\n",
+ map_name, container_id);
+ return -EINVAL;
+ }
+
+ if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
+ BTF_INFO_VLEN(container_type->info) < 2) {
+ pr_warning("map:%s container_name:%s is an invalid container struct\n",
+ map_name, container_name);
+ return -EINVAL;
+ }
+
+ key = (struct btf_member *)(container_type + 1);
+ value = key + 1;
+
+ key_size = btf__resolve_size(btf, key->type);
+ if (key_size < 0) {
+ pr_warning("map:%s invalid BTF key_type_size\n", map_name);
+ return key_size;
+ }
+
+ if (expected_key_size != key_size) {
+ pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
+ map_name, (__u32)key_size, expected_key_size);
+ return -EINVAL;
+ }
+
+ value_size = btf__resolve_size(btf, value->type);
+ if (value_size < 0) {
+ pr_warning("map:%s invalid BTF value_type_size\n", map_name);
+ return value_size;
+ }
+
+ if (expected_value_size != value_size) {
+ pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
+ map_name, (__u32)value_size, expected_value_size);
+ return -EINVAL;
+ }
+
+ *key_type_id = key->type;
+ *value_type_id = value->type;
+
+ return 0;
+}
+
+struct btf_ext_sec_setup_param {
__u32 off;
__u32 len;
__u32 min_rec_size;
@@ -512,41 +696,33 @@ struct btf_ext_sec_copy_param {
const char *desc;
};
-static int btf_ext_copy_info(struct btf_ext *btf_ext,
- __u8 *data, __u32 data_size,
- struct btf_ext_sec_copy_param *ext_sec,
- btf_print_fn_t err_log)
+static int btf_ext_setup_info(struct btf_ext *btf_ext,
+ struct btf_ext_sec_setup_param *ext_sec)
{
- const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
const struct btf_ext_info_sec *sinfo;
struct btf_ext_info *ext_info;
__u32 info_left, record_size;
/* The start of the info sec (including the __u32 record_size). */
- const void *info;
-
- /* data and data_size do not include btf_ext_header from now on */
- data = data + hdr->hdr_len;
- data_size -= hdr->hdr_len;
+ void *info;
if (ext_sec->off & 0x03) {
- elog(".BTF.ext %s section is not aligned to 4 bytes\n",
+ pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
ext_sec->desc);
return -EINVAL;
}
- if (data_size < ext_sec->off ||
- ext_sec->len > data_size - ext_sec->off) {
- elog("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
- ext_sec->desc, ext_sec->off, ext_sec->len);
+ info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
+ info_left = ext_sec->len;
+
+ if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
+ pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
+ ext_sec->desc, ext_sec->off, ext_sec->len);
return -EINVAL;
}
- info = data + ext_sec->off;
- info_left = ext_sec->len;
-
/* At least a record size */
if (info_left < sizeof(__u32)) {
- elog(".BTF.ext %s record size not found\n", ext_sec->desc);
+ pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
return -EINVAL;
}
@@ -554,8 +730,8 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
record_size = *(__u32 *)info;
if (record_size < ext_sec->min_rec_size ||
record_size & 0x03) {
- elog("%s section in .BTF.ext has invalid record size %u\n",
- ext_sec->desc, record_size);
+ pr_debug("%s section in .BTF.ext has invalid record size %u\n",
+ ext_sec->desc, record_size);
return -EINVAL;
}
@@ -564,7 +740,7 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
/* If no records, return failure now so .BTF.ext won't be used. */
if (!info_left) {
- elog("%s section in .BTF.ext has no records", ext_sec->desc);
+ pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
return -EINVAL;
}
@@ -574,14 +750,14 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
__u32 num_records;
if (info_left < sec_hdrlen) {
- elog("%s section header is not found in .BTF.ext\n",
+ pr_debug("%s section header is not found in .BTF.ext\n",
ext_sec->desc);
return -EINVAL;
}
num_records = sinfo->num_info;
if (num_records == 0) {
- elog("%s section has incorrect num_records in .BTF.ext\n",
+ pr_debug("%s section has incorrect num_records in .BTF.ext\n",
ext_sec->desc);
return -EINVAL;
}
@@ -589,7 +765,7 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
total_record_size = sec_hdrlen +
(__u64)num_records * record_size;
if (info_left < total_record_size) {
- elog("%s section has incorrect num_records in .BTF.ext\n",
+ pr_debug("%s section has incorrect num_records in .BTF.ext\n",
ext_sec->desc);
return -EINVAL;
}
@@ -601,74 +777,64 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
ext_info = ext_sec->ext_info;
ext_info->len = ext_sec->len - sizeof(__u32);
ext_info->rec_size = record_size;
- ext_info->info = malloc(ext_info->len);
- if (!ext_info->info)
- return -ENOMEM;
- memcpy(ext_info->info, info + sizeof(__u32), ext_info->len);
+ ext_info->info = info + sizeof(__u32);
return 0;
}
-static int btf_ext_copy_func_info(struct btf_ext *btf_ext,
- __u8 *data, __u32 data_size,
- btf_print_fn_t err_log)
+static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
{
- const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
- struct btf_ext_sec_copy_param param = {
- .off = hdr->func_info_off,
- .len = hdr->func_info_len,
+ struct btf_ext_sec_setup_param param = {
+ .off = btf_ext->hdr->func_info_off,
+ .len = btf_ext->hdr->func_info_len,
.min_rec_size = sizeof(struct bpf_func_info_min),
.ext_info = &btf_ext->func_info,
.desc = "func_info"
};
- return btf_ext_copy_info(btf_ext, data, data_size, &param, err_log);
+ return btf_ext_setup_info(btf_ext, &param);
}
-static int btf_ext_copy_line_info(struct btf_ext *btf_ext,
- __u8 *data, __u32 data_size,
- btf_print_fn_t err_log)
+static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
{
- const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
- struct btf_ext_sec_copy_param param = {
- .off = hdr->line_info_off,
- .len = hdr->line_info_len,
+ struct btf_ext_sec_setup_param param = {
+ .off = btf_ext->hdr->line_info_off,
+ .len = btf_ext->hdr->line_info_len,
.min_rec_size = sizeof(struct bpf_line_info_min),
.ext_info = &btf_ext->line_info,
.desc = "line_info",
};
- return btf_ext_copy_info(btf_ext, data, data_size, &param, err_log);
+ return btf_ext_setup_info(btf_ext, &param);
}
-static int btf_ext_parse_hdr(__u8 *data, __u32 data_size,
- btf_print_fn_t err_log)
+static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
{
const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
if (data_size < offsetof(struct btf_ext_header, func_info_off) ||
data_size < hdr->hdr_len) {
- elog("BTF.ext header not found");
+ pr_debug("BTF.ext header not found");
return -EINVAL;
}
if (hdr->magic != BTF_MAGIC) {
- elog("Invalid BTF.ext magic:%x\n", hdr->magic);
+ pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
return -EINVAL;
}
if (hdr->version != BTF_VERSION) {
- elog("Unsupported BTF.ext version:%u\n", hdr->version);
+ pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
return -ENOTSUP;
}
if (hdr->flags) {
- elog("Unsupported BTF.ext flags:%x\n", hdr->flags);
+ pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
return -ENOTSUP;
}
if (data_size == hdr->hdr_len) {
- elog("BTF.ext has no data\n");
+ pr_debug("BTF.ext has no data\n");
return -EINVAL;
}
@@ -679,18 +845,16 @@ void btf_ext__free(struct btf_ext *btf_ext)
{
if (!btf_ext)
return;
-
- free(btf_ext->func_info.info);
- free(btf_ext->line_info.info);
+ free(btf_ext->data);
free(btf_ext);
}
-struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
+struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
{
struct btf_ext *btf_ext;
int err;
- err = btf_ext_parse_hdr(data, size, err_log);
+ err = btf_ext_parse_hdr(data, size);
if (err)
return ERR_PTR(err);
@@ -698,13 +862,23 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
if (!btf_ext)
return ERR_PTR(-ENOMEM);
- err = btf_ext_copy_func_info(btf_ext, data, size, err_log);
- if (err) {
- btf_ext__free(btf_ext);
- return ERR_PTR(err);
+ btf_ext->data_size = size;
+ btf_ext->data = malloc(size);
+ if (!btf_ext->data) {
+ err = -ENOMEM;
+ goto done;
}
+ memcpy(btf_ext->data, data, size);
- err = btf_ext_copy_line_info(btf_ext, data, size, err_log);
+ err = btf_ext_setup_func_info(btf_ext);
+ if (err)
+ goto done;
+
+ err = btf_ext_setup_line_info(btf_ext);
+ if (err)
+ goto done;
+
+done:
if (err) {
btf_ext__free(btf_ext);
return ERR_PTR(err);
@@ -713,6 +887,12 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
return btf_ext;
}
+const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
+{
+ *size = btf_ext->data_size;
+ return btf_ext->data;
+}
+
static int btf_ext_reloc_info(const struct btf *btf,
const struct btf_ext_info *ext_info,
const char *sec_name, __u32 insns_cnt,
@@ -761,7 +941,8 @@ static int btf_ext_reloc_info(const struct btf *btf,
return -ENOENT;
}
-int btf_ext__reloc_func_info(const struct btf *btf, const struct btf_ext *btf_ext,
+int btf_ext__reloc_func_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
const char *sec_name, __u32 insns_cnt,
void **func_info, __u32 *cnt)
{
@@ -769,7 +950,8 @@ int btf_ext__reloc_func_info(const struct btf *btf, const struct btf_ext *btf_ex
insns_cnt, func_info, cnt);
}
-int btf_ext__reloc_line_info(const struct btf *btf, const struct btf_ext *btf_ext,
+int btf_ext__reloc_line_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
const char *sec_name, __u32 insns_cnt,
void **line_info, __u32 *cnt)
{
@@ -786,3 +968,1831 @@ __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
{
return btf_ext->line_info.rec_size;
}
+
+struct btf_dedup;
+
+static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
+ const struct btf_dedup_opts *opts);
+static void btf_dedup_free(struct btf_dedup *d);
+static int btf_dedup_strings(struct btf_dedup *d);
+static int btf_dedup_prim_types(struct btf_dedup *d);
+static int btf_dedup_struct_types(struct btf_dedup *d);
+static int btf_dedup_ref_types(struct btf_dedup *d);
+static int btf_dedup_compact_types(struct btf_dedup *d);
+static int btf_dedup_remap_types(struct btf_dedup *d);
+
+/*
+ * Deduplicate BTF types and strings.
+ *
+ * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
+ * section with all BTF type descriptors and string data. It overwrites that
+ * memory in-place with deduplicated types and strings without any loss of
+ * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
+ * is provided, all the strings referenced from .BTF.ext section are honored
+ * and updated to point to the right offsets after deduplication.
+ *
+ * If function returns with error, type/string data might be garbled and should
+ * be discarded.
+ *
+ * More verbose and detailed description of both problem btf_dedup is solving,
+ * as well as solution could be found at:
+ * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
+ *
+ * Problem description and justification
+ * =====================================
+ *
+ * BTF type information is typically emitted either as a result of conversion
+ * from DWARF to BTF or directly by compiler. In both cases, each compilation
+ * unit contains information about a subset of all the types that are used
+ * in an application. These subsets are frequently overlapping and contain a lot
+ * of duplicated information when later concatenated together into a single
+ * binary. This algorithm ensures that each unique type is represented by single
+ * BTF type descriptor, greatly reducing resulting size of BTF data.
+ *
+ * Compilation unit isolation and subsequent duplication of data is not the only
+ * problem. The same type hierarchy (e.g., struct and all the type that struct
+ * references) in different compilation units can be represented in BTF to
+ * various degrees of completeness (or, rather, incompleteness) due to
+ * struct/union forward declarations.
+ *
+ * Let's take a look at an example, that we'll use to better understand the
+ * problem (and solution). Suppose we have two compilation units, each using
+ * same `struct S`, but each of them having incomplete type information about
+ * struct's fields:
+ *
+ * // CU #1:
+ * struct S;
+ * struct A {
+ * int a;
+ * struct A* self;
+ * struct S* parent;
+ * };
+ * struct B;
+ * struct S {
+ * struct A* a_ptr;
+ * struct B* b_ptr;
+ * };
+ *
+ * // CU #2:
+ * struct S;
+ * struct A;
+ * struct B {
+ * int b;
+ * struct B* self;
+ * struct S* parent;
+ * };
+ * struct S {
+ * struct A* a_ptr;
+ * struct B* b_ptr;
+ * };
+ *
+ * In case of CU #1, BTF data will know only that `struct B` exist (but no
+ * more), but will know the complete type information about `struct A`. While
+ * for CU #2, it will know full type information about `struct B`, but will
+ * only know about forward declaration of `struct A` (in BTF terms, it will
+ * have `BTF_KIND_FWD` type descriptor with name `B`).
+ *
+ * This compilation unit isolation means that it's possible that there is no
+ * single CU with complete type information describing structs `S`, `A`, and
+ * `B`. Also, we might get tons of duplicated and redundant type information.
+ *
+ * Additional complication we need to keep in mind comes from the fact that
+ * types, in general, can form graphs containing cycles, not just DAGs.
+ *
+ * While algorithm does deduplication, it also merges and resolves type
+ * information (unless disabled throught `struct btf_opts`), whenever possible.
+ * E.g., in the example above with two compilation units having partial type
+ * information for structs `A` and `B`, the output of algorithm will emit
+ * a single copy of each BTF type that describes structs `A`, `B`, and `S`
+ * (as well as type information for `int` and pointers), as if they were defined
+ * in a single compilation unit as:
+ *
+ * struct A {
+ * int a;
+ * struct A* self;
+ * struct S* parent;
+ * };
+ * struct B {
+ * int b;
+ * struct B* self;
+ * struct S* parent;
+ * };
+ * struct S {
+ * struct A* a_ptr;
+ * struct B* b_ptr;
+ * };
+ *
+ * Algorithm summary
+ * =================
+ *
+ * Algorithm completes its work in 6 separate passes:
+ *
+ * 1. Strings deduplication.
+ * 2. Primitive types deduplication (int, enum, fwd).
+ * 3. Struct/union types deduplication.
+ * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func
+ * protos, and const/volatile/restrict modifiers).
+ * 5. Types compaction.
+ * 6. Types remapping.
+ *
+ * Algorithm determines canonical type descriptor, which is a single
+ * representative type for each truly unique type. This canonical type is the
+ * one that will go into final deduplicated BTF type information. For
+ * struct/unions, it is also the type that algorithm will merge additional type
+ * information into (while resolving FWDs), as it discovers it from data in
+ * other CUs. Each input BTF type eventually gets either mapped to itself, if
+ * that type is canonical, or to some other type, if that type is equivalent
+ * and was chosen as canonical representative. This mapping is stored in
+ * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
+ * FWD type got resolved to.
+ *
+ * To facilitate fast discovery of canonical types, we also maintain canonical
+ * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
+ * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
+ * that match that signature. With sufficiently good choice of type signature
+ * hashing function, we can limit number of canonical types for each unique type
+ * signature to a very small number, allowing to find canonical type for any
+ * duplicated type very quickly.
+ *
+ * Struct/union deduplication is the most critical part and algorithm for
+ * deduplicating structs/unions is described in greater details in comments for
+ * `btf_dedup_is_equiv` function.
+ */
+int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
+ const struct btf_dedup_opts *opts)
+{
+ struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts);
+ int err;
+
+ if (IS_ERR(d)) {
+ pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
+ return -EINVAL;
+ }
+
+ err = btf_dedup_strings(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_strings failed:%d\n", err);
+ goto done;
+ }
+ err = btf_dedup_prim_types(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_prim_types failed:%d\n", err);
+ goto done;
+ }
+ err = btf_dedup_struct_types(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_struct_types failed:%d\n", err);
+ goto done;
+ }
+ err = btf_dedup_ref_types(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_ref_types failed:%d\n", err);
+ goto done;
+ }
+ err = btf_dedup_compact_types(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_compact_types failed:%d\n", err);
+ goto done;
+ }
+ err = btf_dedup_remap_types(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_remap_types failed:%d\n", err);
+ goto done;
+ }
+
+done:
+ btf_dedup_free(d);
+ return err;
+}
+
+#define BTF_DEDUP_TABLE_DEFAULT_SIZE (1 << 14)
+#define BTF_DEDUP_TABLE_MAX_SIZE_LOG 31
+#define BTF_UNPROCESSED_ID ((__u32)-1)
+#define BTF_IN_PROGRESS_ID ((__u32)-2)
+
+struct btf_dedup_node {
+ struct btf_dedup_node *next;
+ __u32 type_id;
+};
+
+struct btf_dedup {
+ /* .BTF section to be deduped in-place */
+ struct btf *btf;
+ /*
+ * Optional .BTF.ext section. When provided, any strings referenced
+ * from it will be taken into account when deduping strings
+ */
+ struct btf_ext *btf_ext;
+ /*
+ * This is a map from any type's signature hash to a list of possible
+ * canonical representative type candidates. Hash collisions are
+ * ignored, so even types of various kinds can share same list of
+ * candidates, which is fine because we rely on subsequent
+ * btf_xxx_equal() checks to authoritatively verify type equality.
+ */
+ struct btf_dedup_node **dedup_table;
+ /* Canonical types map */
+ __u32 *map;
+ /* Hypothetical mapping, used during type graph equivalence checks */
+ __u32 *hypot_map;
+ __u32 *hypot_list;
+ size_t hypot_cnt;
+ size_t hypot_cap;
+ /* Various option modifying behavior of algorithm */
+ struct btf_dedup_opts opts;
+};
+
+struct btf_str_ptr {
+ const char *str;
+ __u32 new_off;
+ bool used;
+};
+
+struct btf_str_ptrs {
+ struct btf_str_ptr *ptrs;
+ const char *data;
+ __u32 cnt;
+ __u32 cap;
+};
+
+static inline __u32 hash_combine(__u32 h, __u32 value)
+{
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME 0x9e370001UL
+ return h * 37 + value * GOLDEN_RATIO_PRIME;
+#undef GOLDEN_RATIO_PRIME
+}
+
+#define for_each_dedup_cand(d, hash, node) \
+ for (node = d->dedup_table[hash & (d->opts.dedup_table_size - 1)]; \
+ node; \
+ node = node->next)
+
+static int btf_dedup_table_add(struct btf_dedup *d, __u32 hash, __u32 type_id)
+{
+ struct btf_dedup_node *node = malloc(sizeof(struct btf_dedup_node));
+ int bucket = hash & (d->opts.dedup_table_size - 1);
+
+ if (!node)
+ return -ENOMEM;
+ node->type_id = type_id;
+ node->next = d->dedup_table[bucket];
+ d->dedup_table[bucket] = node;
+ return 0;
+}
+
+static int btf_dedup_hypot_map_add(struct btf_dedup *d,
+ __u32 from_id, __u32 to_id)
+{
+ if (d->hypot_cnt == d->hypot_cap) {
+ __u32 *new_list;
+
+ d->hypot_cap += max(16, d->hypot_cap / 2);
+ new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap);
+ if (!new_list)
+ return -ENOMEM;
+ d->hypot_list = new_list;
+ }
+ d->hypot_list[d->hypot_cnt++] = from_id;
+ d->hypot_map[from_id] = to_id;
+ return 0;
+}
+
+static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
+{
+ int i;
+
+ for (i = 0; i < d->hypot_cnt; i++)
+ d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
+ d->hypot_cnt = 0;
+}
+
+static void btf_dedup_table_free(struct btf_dedup *d)
+{
+ struct btf_dedup_node *head, *tmp;
+ int i;
+
+ if (!d->dedup_table)
+ return;
+
+ for (i = 0; i < d->opts.dedup_table_size; i++) {
+ while (d->dedup_table[i]) {
+ tmp = d->dedup_table[i];
+ d->dedup_table[i] = tmp->next;
+ free(tmp);
+ }
+
+ head = d->dedup_table[i];
+ while (head) {
+ tmp = head;
+ head = head->next;
+ free(tmp);
+ }
+ }
+
+ free(d->dedup_table);
+ d->dedup_table = NULL;
+}
+
+static void btf_dedup_free(struct btf_dedup *d)
+{
+ btf_dedup_table_free(d);
+
+ free(d->map);
+ d->map = NULL;
+
+ free(d->hypot_map);
+ d->hypot_map = NULL;
+
+ free(d->hypot_list);
+ d->hypot_list = NULL;
+
+ free(d);
+}
+
+/* Find closest power of two >= to size, capped at 2^max_size_log */
+static __u32 roundup_pow2_max(__u32 size, int max_size_log)
+{
+ int i;
+
+ for (i = 0; i < max_size_log && (1U << i) < size; i++)
+ ;
+ return 1U << i;
+}
+
+
+static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
+ const struct btf_dedup_opts *opts)
+{
+ struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
+ int i, err = 0;
+ __u32 sz;
+
+ if (!d)
+ return ERR_PTR(-ENOMEM);
+
+ d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
+ sz = opts && opts->dedup_table_size ? opts->dedup_table_size
+ : BTF_DEDUP_TABLE_DEFAULT_SIZE;
+ sz = roundup_pow2_max(sz, BTF_DEDUP_TABLE_MAX_SIZE_LOG);
+ d->opts.dedup_table_size = sz;
+
+ d->btf = btf;
+ d->btf_ext = btf_ext;
+
+ d->dedup_table = calloc(d->opts.dedup_table_size,
+ sizeof(struct btf_dedup_node *));
+ if (!d->dedup_table) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ d->map = malloc(sizeof(__u32) * (1 + btf->nr_types));
+ if (!d->map) {
+ err = -ENOMEM;
+ goto done;
+ }
+ /* special BTF "void" type is made canonical immediately */
+ d->map[0] = 0;
+ for (i = 1; i <= btf->nr_types; i++) {
+ struct btf_type *t = d->btf->types[i];
+ __u16 kind = BTF_INFO_KIND(t->info);
+
+ /* VAR and DATASEC are never deduped and are self-canonical */
+ if (kind == BTF_KIND_VAR || kind == BTF_KIND_DATASEC)
+ d->map[i] = i;
+ else
+ d->map[i] = BTF_UNPROCESSED_ID;
+ }
+
+ d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types));
+ if (!d->hypot_map) {
+ err = -ENOMEM;
+ goto done;
+ }
+ for (i = 0; i <= btf->nr_types; i++)
+ d->hypot_map[i] = BTF_UNPROCESSED_ID;
+
+done:
+ if (err) {
+ btf_dedup_free(d);
+ return ERR_PTR(err);
+ }
+
+ return d;
+}
+
+typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx);
+
+/*
+ * Iterate over all possible places in .BTF and .BTF.ext that can reference
+ * string and pass pointer to it to a provided callback `fn`.
+ */
+static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
+{
+ void *line_data_cur, *line_data_end;
+ int i, j, r, rec_size;
+ struct btf_type *t;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ t = d->btf->types[i];
+ r = fn(&t->name_off, ctx);
+ if (r)
+ return r;
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ struct btf_member *m = (struct btf_member *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ for (j = 0; j < vlen; j++) {
+ r = fn(&m->name_off, ctx);
+ if (r)
+ return r;
+ m++;
+ }
+ break;
+ }
+ case BTF_KIND_ENUM: {
+ struct btf_enum *m = (struct btf_enum *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ for (j = 0; j < vlen; j++) {
+ r = fn(&m->name_off, ctx);
+ if (r)
+ return r;
+ m++;
+ }
+ break;
+ }
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *m = (struct btf_param *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ for (j = 0; j < vlen; j++) {
+ r = fn(&m->name_off, ctx);
+ if (r)
+ return r;
+ m++;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ if (!d->btf_ext)
+ return 0;
+
+ line_data_cur = d->btf_ext->line_info.info;
+ line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len;
+ rec_size = d->btf_ext->line_info.rec_size;
+
+ while (line_data_cur < line_data_end) {
+ struct btf_ext_info_sec *sec = line_data_cur;
+ struct bpf_line_info_min *line_info;
+ __u32 num_info = sec->num_info;
+
+ r = fn(&sec->sec_name_off, ctx);
+ if (r)
+ return r;
+
+ line_data_cur += sizeof(struct btf_ext_info_sec);
+ for (i = 0; i < num_info; i++) {
+ line_info = line_data_cur;
+ r = fn(&line_info->file_name_off, ctx);
+ if (r)
+ return r;
+ r = fn(&line_info->line_off, ctx);
+ if (r)
+ return r;
+ line_data_cur += rec_size;
+ }
+ }
+
+ return 0;
+}
+
+static int str_sort_by_content(const void *a1, const void *a2)
+{
+ const struct btf_str_ptr *p1 = a1;
+ const struct btf_str_ptr *p2 = a2;
+
+ return strcmp(p1->str, p2->str);
+}
+
+static int str_sort_by_offset(const void *a1, const void *a2)
+{
+ const struct btf_str_ptr *p1 = a1;
+ const struct btf_str_ptr *p2 = a2;
+
+ if (p1->str != p2->str)
+ return p1->str < p2->str ? -1 : 1;
+ return 0;
+}
+
+static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem)
+{
+ const struct btf_str_ptr *p = pelem;
+
+ if (str_ptr != p->str)
+ return (const char *)str_ptr < p->str ? -1 : 1;
+ return 0;
+}
+
+static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx)
+{
+ struct btf_str_ptrs *strs;
+ struct btf_str_ptr *s;
+
+ if (*str_off_ptr == 0)
+ return 0;
+
+ strs = ctx;
+ s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
+ sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
+ if (!s)
+ return -EINVAL;
+ s->used = true;
+ return 0;
+}
+
+static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx)
+{
+ struct btf_str_ptrs *strs;
+ struct btf_str_ptr *s;
+
+ if (*str_off_ptr == 0)
+ return 0;
+
+ strs = ctx;
+ s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
+ sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
+ if (!s)
+ return -EINVAL;
+ *str_off_ptr = s->new_off;
+ return 0;
+}
+
+/*
+ * Dedup string and filter out those that are not referenced from either .BTF
+ * or .BTF.ext (if provided) sections.
+ *
+ * This is done by building index of all strings in BTF's string section,
+ * then iterating over all entities that can reference strings (e.g., type
+ * names, struct field names, .BTF.ext line info, etc) and marking corresponding
+ * strings as used. After that all used strings are deduped and compacted into
+ * sequential blob of memory and new offsets are calculated. Then all the string
+ * references are iterated again and rewritten using new offsets.
+ */
+static int btf_dedup_strings(struct btf_dedup *d)
+{
+ const struct btf_header *hdr = d->btf->hdr;
+ char *start = (char *)d->btf->nohdr_data + hdr->str_off;
+ char *end = start + d->btf->hdr->str_len;
+ char *p = start, *tmp_strs = NULL;
+ struct btf_str_ptrs strs = {
+ .cnt = 0,
+ .cap = 0,
+ .ptrs = NULL,
+ .data = start,
+ };
+ int i, j, err = 0, grp_idx;
+ bool grp_used;
+
+ /* build index of all strings */
+ while (p < end) {
+ if (strs.cnt + 1 > strs.cap) {
+ struct btf_str_ptr *new_ptrs;
+
+ strs.cap += max(strs.cnt / 2, 16);
+ new_ptrs = realloc(strs.ptrs,
+ sizeof(strs.ptrs[0]) * strs.cap);
+ if (!new_ptrs) {
+ err = -ENOMEM;
+ goto done;
+ }
+ strs.ptrs = new_ptrs;
+ }
+
+ strs.ptrs[strs.cnt].str = p;
+ strs.ptrs[strs.cnt].used = false;
+
+ p += strlen(p) + 1;
+ strs.cnt++;
+ }
+
+ /* temporary storage for deduplicated strings */
+ tmp_strs = malloc(d->btf->hdr->str_len);
+ if (!tmp_strs) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ /* mark all used strings */
+ strs.ptrs[0].used = true;
+ err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs);
+ if (err)
+ goto done;
+
+ /* sort strings by context, so that we can identify duplicates */
+ qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content);
+
+ /*
+ * iterate groups of equal strings and if any instance in a group was
+ * referenced, emit single instance and remember new offset
+ */
+ p = tmp_strs;
+ grp_idx = 0;
+ grp_used = strs.ptrs[0].used;
+ /* iterate past end to avoid code duplication after loop */
+ for (i = 1; i <= strs.cnt; i++) {
+ /*
+ * when i == strs.cnt, we want to skip string comparison and go
+ * straight to handling last group of strings (otherwise we'd
+ * need to handle last group after the loop w/ duplicated code)
+ */
+ if (i < strs.cnt &&
+ !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) {
+ grp_used = grp_used || strs.ptrs[i].used;
+ continue;
+ }
+
+ /*
+ * this check would have been required after the loop to handle
+ * last group of strings, but due to <= condition in a loop
+ * we avoid that duplication
+ */
+ if (grp_used) {
+ int new_off = p - tmp_strs;
+ __u32 len = strlen(strs.ptrs[grp_idx].str);
+
+ memmove(p, strs.ptrs[grp_idx].str, len + 1);
+ for (j = grp_idx; j < i; j++)
+ strs.ptrs[j].new_off = new_off;
+ p += len + 1;
+ }
+
+ if (i < strs.cnt) {
+ grp_idx = i;
+ grp_used = strs.ptrs[i].used;
+ }
+ }
+
+ /* replace original strings with deduped ones */
+ d->btf->hdr->str_len = p - tmp_strs;
+ memmove(start, tmp_strs, d->btf->hdr->str_len);
+ end = start + d->btf->hdr->str_len;
+
+ /* restore original order for further binary search lookups */
+ qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset);
+
+ /* remap string offsets */
+ err = btf_for_each_str_off(d, btf_str_remap_offset, &strs);
+ if (err)
+ goto done;
+
+ d->btf->hdr->str_len = end - start;
+
+done:
+ free(tmp_strs);
+ free(strs.ptrs);
+ return err;
+}
+
+static __u32 btf_hash_common(struct btf_type *t)
+{
+ __u32 h;
+
+ h = hash_combine(0, t->name_off);
+ h = hash_combine(h, t->info);
+ h = hash_combine(h, t->size);
+ return h;
+}
+
+static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
+{
+ return t1->name_off == t2->name_off &&
+ t1->info == t2->info &&
+ t1->size == t2->size;
+}
+
+/* Calculate type signature hash of INT. */
+static __u32 btf_hash_int(struct btf_type *t)
+{
+ __u32 info = *(__u32 *)(t + 1);
+ __u32 h;
+
+ h = btf_hash_common(t);
+ h = hash_combine(h, info);
+ return h;
+}
+
+/* Check structural equality of two INTs. */
+static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
+{
+ __u32 info1, info2;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+ info1 = *(__u32 *)(t1 + 1);
+ info2 = *(__u32 *)(t2 + 1);
+ return info1 == info2;
+}
+
+/* Calculate type signature hash of ENUM. */
+static __u32 btf_hash_enum(struct btf_type *t)
+{
+ __u32 h;
+
+ /* don't hash vlen and enum members to support enum fwd resolving */
+ h = hash_combine(0, t->name_off);
+ h = hash_combine(h, t->info & ~0xffff);
+ h = hash_combine(h, t->size);
+ return h;
+}
+
+/* Check structural equality of two ENUMs. */
+static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_enum *m1, *m2;
+ __u16 vlen;
+ int i;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ vlen = BTF_INFO_VLEN(t1->info);
+ m1 = (struct btf_enum *)(t1 + 1);
+ m2 = (struct btf_enum *)(t2 + 1);
+ for (i = 0; i < vlen; i++) {
+ if (m1->name_off != m2->name_off || m1->val != m2->val)
+ return false;
+ m1++;
+ m2++;
+ }
+ return true;
+}
+
+static inline bool btf_is_enum_fwd(struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM &&
+ BTF_INFO_VLEN(t->info) == 0;
+}
+
+static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
+{
+ if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
+ return btf_equal_enum(t1, t2);
+ /* ignore vlen when comparing */
+ return t1->name_off == t2->name_off &&
+ (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
+ t1->size == t2->size;
+}
+
+/*
+ * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
+ * as referenced type IDs equivalence is established separately during type
+ * graph equivalence check algorithm.
+ */
+static __u32 btf_hash_struct(struct btf_type *t)
+{
+ struct btf_member *member = (struct btf_member *)(t + 1);
+ __u32 vlen = BTF_INFO_VLEN(t->info);
+ __u32 h = btf_hash_common(t);
+ int i;
+
+ for (i = 0; i < vlen; i++) {
+ h = hash_combine(h, member->name_off);
+ h = hash_combine(h, member->offset);
+ /* no hashing of referenced type ID, it can be unresolved yet */
+ member++;
+ }
+ return h;
+}
+
+/*
+ * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
+ * IDs. This check is performed during type graph equivalence check and
+ * referenced types equivalence is checked separately.
+ */
+static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_member *m1, *m2;
+ __u16 vlen;
+ int i;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ vlen = BTF_INFO_VLEN(t1->info);
+ m1 = (struct btf_member *)(t1 + 1);
+ m2 = (struct btf_member *)(t2 + 1);
+ for (i = 0; i < vlen; i++) {
+ if (m1->name_off != m2->name_off || m1->offset != m2->offset)
+ return false;
+ m1++;
+ m2++;
+ }
+ return true;
+}
+
+/*
+ * Calculate type signature hash of ARRAY, including referenced type IDs,
+ * under assumption that they were already resolved to canonical type IDs and
+ * are not going to change.
+ */
+static __u32 btf_hash_array(struct btf_type *t)
+{
+ struct btf_array *info = (struct btf_array *)(t + 1);
+ __u32 h = btf_hash_common(t);
+
+ h = hash_combine(h, info->type);
+ h = hash_combine(h, info->index_type);
+ h = hash_combine(h, info->nelems);
+ return h;
+}
+
+/*
+ * Check exact equality of two ARRAYs, taking into account referenced
+ * type IDs, under assumption that they were already resolved to canonical
+ * type IDs and are not going to change.
+ * This function is called during reference types deduplication to compare
+ * ARRAY to potential canonical representative.
+ */
+static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_array *info1, *info2;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ info1 = (struct btf_array *)(t1 + 1);
+ info2 = (struct btf_array *)(t2 + 1);
+ return info1->type == info2->type &&
+ info1->index_type == info2->index_type &&
+ info1->nelems == info2->nelems;
+}
+
+/*
+ * Check structural compatibility of two ARRAYs, ignoring referenced type
+ * IDs. This check is performed during type graph equivalence check and
+ * referenced types equivalence is checked separately.
+ */
+static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_array *info1, *info2;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ info1 = (struct btf_array *)(t1 + 1);
+ info2 = (struct btf_array *)(t2 + 1);
+ return info1->nelems == info2->nelems;
+}
+
+/*
+ * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
+ * under assumption that they were already resolved to canonical type IDs and
+ * are not going to change.
+ */
+static inline __u32 btf_hash_fnproto(struct btf_type *t)
+{
+ struct btf_param *member = (struct btf_param *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+ __u32 h = btf_hash_common(t);
+ int i;
+
+ for (i = 0; i < vlen; i++) {
+ h = hash_combine(h, member->name_off);
+ h = hash_combine(h, member->type);
+ member++;
+ }
+ return h;
+}
+
+/*
+ * Check exact equality of two FUNC_PROTOs, taking into account referenced
+ * type IDs, under assumption that they were already resolved to canonical
+ * type IDs and are not going to change.
+ * This function is called during reference types deduplication to compare
+ * FUNC_PROTO to potential canonical representative.
+ */
+static inline bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_param *m1, *m2;
+ __u16 vlen;
+ int i;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ vlen = BTF_INFO_VLEN(t1->info);
+ m1 = (struct btf_param *)(t1 + 1);
+ m2 = (struct btf_param *)(t2 + 1);
+ for (i = 0; i < vlen; i++) {
+ if (m1->name_off != m2->name_off || m1->type != m2->type)
+ return false;
+ m1++;
+ m2++;
+ }
+ return true;
+}
+
+/*
+ * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
+ * IDs. This check is performed during type graph equivalence check and
+ * referenced types equivalence is checked separately.
+ */
+static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
+{
+ struct btf_param *m1, *m2;
+ __u16 vlen;
+ int i;
+
+ /* skip return type ID */
+ if (t1->name_off != t2->name_off || t1->info != t2->info)
+ return false;
+
+ vlen = BTF_INFO_VLEN(t1->info);
+ m1 = (struct btf_param *)(t1 + 1);
+ m2 = (struct btf_param *)(t2 + 1);
+ for (i = 0; i < vlen; i++) {
+ if (m1->name_off != m2->name_off)
+ return false;
+ m1++;
+ m2++;
+ }
+ return true;
+}
+
+/*
+ * Deduplicate primitive types, that can't reference other types, by calculating
+ * their type signature hash and comparing them with any possible canonical
+ * candidate. If no canonical candidate matches, type itself is marked as
+ * canonical and is added into `btf_dedup->dedup_table` as another candidate.
+ */
+static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
+{
+ struct btf_type *t = d->btf->types[type_id];
+ struct btf_type *cand;
+ struct btf_dedup_node *cand_node;
+ /* if we don't find equivalent type, then we are canonical */
+ __u32 new_id = type_id;
+ __u32 h;
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_ARRAY:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_FUNC:
+ case BTF_KIND_FUNC_PROTO:
+ case BTF_KIND_VAR:
+ case BTF_KIND_DATASEC:
+ return 0;
+
+ case BTF_KIND_INT:
+ h = btf_hash_int(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_int(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ }
+ break;
+
+ case BTF_KIND_ENUM:
+ h = btf_hash_enum(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_enum(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ if (d->opts.dont_resolve_fwds)
+ continue;
+ if (btf_compat_enum(t, cand)) {
+ if (btf_is_enum_fwd(t)) {
+ /* resolve fwd to full enum */
+ new_id = cand_node->type_id;
+ break;
+ }
+ /* resolve canonical enum fwd to full enum */
+ d->map[cand_node->type_id] = type_id;
+ }
+ }
+ break;
+
+ case BTF_KIND_FWD:
+ h = btf_hash_common(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_common(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ d->map[type_id] = new_id;
+ if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int btf_dedup_prim_types(struct btf_dedup *d)
+{
+ int i, err;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ err = btf_dedup_prim_type(d, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Check whether type is already mapped into canonical one (could be to itself).
+ */
+static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
+{
+ return d->map[type_id] <= BTF_MAX_NR_TYPES;
+}
+
+/*
+ * Resolve type ID into its canonical type ID, if any; otherwise return original
+ * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
+ * STRUCT/UNION link and resolve it into canonical type ID as well.
+ */
+static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
+{
+ while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
+ type_id = d->map[type_id];
+ return type_id;
+}
+
+/*
+ * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
+ * type ID.
+ */
+static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
+{
+ __u32 orig_type_id = type_id;
+
+ if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD)
+ return type_id;
+
+ while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
+ type_id = d->map[type_id];
+
+ if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD)
+ return type_id;
+
+ return orig_type_id;
+}
+
+
+static inline __u16 btf_fwd_kind(struct btf_type *t)
+{
+ return BTF_INFO_KFLAG(t->info) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
+}
+
+/*
+ * Check equivalence of BTF type graph formed by candidate struct/union (we'll
+ * call it "candidate graph" in this description for brevity) to a type graph
+ * formed by (potential) canonical struct/union ("canonical graph" for brevity
+ * here, though keep in mind that not all types in canonical graph are
+ * necessarily canonical representatives themselves, some of them might be
+ * duplicates or its uniqueness might not have been established yet).
+ * Returns:
+ * - >0, if type graphs are equivalent;
+ * - 0, if not equivalent;
+ * - <0, on error.
+ *
+ * Algorithm performs side-by-side DFS traversal of both type graphs and checks
+ * equivalence of BTF types at each step. If at any point BTF types in candidate
+ * and canonical graphs are not compatible structurally, whole graphs are
+ * incompatible. If types are structurally equivalent (i.e., all information
+ * except referenced type IDs is exactly the same), a mapping from `canon_id` to
+ * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
+ * If a type references other types, then those referenced types are checked
+ * for equivalence recursively.
+ *
+ * During DFS traversal, if we find that for current `canon_id` type we
+ * already have some mapping in hypothetical map, we check for two possible
+ * situations:
+ * - `canon_id` is mapped to exactly the same type as `cand_id`. This will
+ * happen when type graphs have cycles. In this case we assume those two
+ * types are equivalent.
+ * - `canon_id` is mapped to different type. This is contradiction in our
+ * hypothetical mapping, because same graph in canonical graph corresponds
+ * to two different types in candidate graph, which for equivalent type
+ * graphs shouldn't happen. This condition terminates equivalence check
+ * with negative result.
+ *
+ * If type graphs traversal exhausts types to check and find no contradiction,
+ * then type graphs are equivalent.
+ *
+ * When checking types for equivalence, there is one special case: FWD types.
+ * If FWD type resolution is allowed and one of the types (either from canonical
+ * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
+ * flag) and their names match, hypothetical mapping is updated to point from
+ * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
+ * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
+ *
+ * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
+ * if there are two exactly named (or anonymous) structs/unions that are
+ * compatible structurally, one of which has FWD field, while other is concrete
+ * STRUCT/UNION, but according to C sources they are different structs/unions
+ * that are referencing different types with the same name. This is extremely
+ * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
+ * this logic is causing problems.
+ *
+ * Doing FWD resolution means that both candidate and/or canonical graphs can
+ * consists of portions of the graph that come from multiple compilation units.
+ * This is due to the fact that types within single compilation unit are always
+ * deduplicated and FWDs are already resolved, if referenced struct/union
+ * definiton is available. So, if we had unresolved FWD and found corresponding
+ * STRUCT/UNION, they will be from different compilation units. This
+ * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
+ * type graph will likely have at least two different BTF types that describe
+ * same type (e.g., most probably there will be two different BTF types for the
+ * same 'int' primitive type) and could even have "overlapping" parts of type
+ * graph that describe same subset of types.
+ *
+ * This in turn means that our assumption that each type in canonical graph
+ * must correspond to exactly one type in candidate graph might not hold
+ * anymore and will make it harder to detect contradictions using hypothetical
+ * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
+ * resolution only in canonical graph. FWDs in candidate graphs are never
+ * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
+ * that can occur:
+ * - Both types in canonical and candidate graphs are FWDs. If they are
+ * structurally equivalent, then they can either be both resolved to the
+ * same STRUCT/UNION or not resolved at all. In both cases they are
+ * equivalent and there is no need to resolve FWD on candidate side.
+ * - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
+ * so nothing to resolve as well, algorithm will check equivalence anyway.
+ * - Type in canonical graph is FWD, while type in candidate is concrete
+ * STRUCT/UNION. In this case candidate graph comes from single compilation
+ * unit, so there is exactly one BTF type for each unique C type. After
+ * resolving FWD into STRUCT/UNION, there might be more than one BTF type
+ * in canonical graph mapping to single BTF type in candidate graph, but
+ * because hypothetical mapping maps from canonical to candidate types, it's
+ * alright, and we still maintain the property of having single `canon_id`
+ * mapping to single `cand_id` (there could be two different `canon_id`
+ * mapped to the same `cand_id`, but it's not contradictory).
+ * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
+ * graph is FWD. In this case we are just going to check compatibility of
+ * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
+ * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
+ * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
+ * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
+ * canonical graph.
+ */
+static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
+ __u32 canon_id)
+{
+ struct btf_type *cand_type;
+ struct btf_type *canon_type;
+ __u32 hypot_type_id;
+ __u16 cand_kind;
+ __u16 canon_kind;
+ int i, eq;
+
+ /* if both resolve to the same canonical, they must be equivalent */
+ if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
+ return 1;
+
+ canon_id = resolve_fwd_id(d, canon_id);
+
+ hypot_type_id = d->hypot_map[canon_id];
+ if (hypot_type_id <= BTF_MAX_NR_TYPES)
+ return hypot_type_id == cand_id;
+
+ if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
+ return -ENOMEM;
+
+ cand_type = d->btf->types[cand_id];
+ canon_type = d->btf->types[canon_id];
+ cand_kind = BTF_INFO_KIND(cand_type->info);
+ canon_kind = BTF_INFO_KIND(canon_type->info);
+
+ if (cand_type->name_off != canon_type->name_off)
+ return 0;
+
+ /* FWD <--> STRUCT/UNION equivalence check, if enabled */
+ if (!d->opts.dont_resolve_fwds
+ && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
+ && cand_kind != canon_kind) {
+ __u16 real_kind;
+ __u16 fwd_kind;
+
+ if (cand_kind == BTF_KIND_FWD) {
+ real_kind = canon_kind;
+ fwd_kind = btf_fwd_kind(cand_type);
+ } else {
+ real_kind = cand_kind;
+ fwd_kind = btf_fwd_kind(canon_type);
+ }
+ return fwd_kind == real_kind;
+ }
+
+ if (cand_kind != canon_kind)
+ return 0;
+
+ switch (cand_kind) {
+ case BTF_KIND_INT:
+ return btf_equal_int(cand_type, canon_type);
+
+ case BTF_KIND_ENUM:
+ if (d->opts.dont_resolve_fwds)
+ return btf_equal_enum(cand_type, canon_type);
+ else
+ return btf_compat_enum(cand_type, canon_type);
+
+ case BTF_KIND_FWD:
+ return btf_equal_common(cand_type, canon_type);
+
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ if (cand_type->info != canon_type->info)
+ return 0;
+ return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
+
+ case BTF_KIND_ARRAY: {
+ struct btf_array *cand_arr, *canon_arr;
+
+ if (!btf_compat_array(cand_type, canon_type))
+ return 0;
+ cand_arr = (struct btf_array *)(cand_type + 1);
+ canon_arr = (struct btf_array *)(canon_type + 1);
+ eq = btf_dedup_is_equiv(d,
+ cand_arr->index_type, canon_arr->index_type);
+ if (eq <= 0)
+ return eq;
+ return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
+ }
+
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ struct btf_member *cand_m, *canon_m;
+ __u16 vlen;
+
+ if (!btf_shallow_equal_struct(cand_type, canon_type))
+ return 0;
+ vlen = BTF_INFO_VLEN(cand_type->info);
+ cand_m = (struct btf_member *)(cand_type + 1);
+ canon_m = (struct btf_member *)(canon_type + 1);
+ for (i = 0; i < vlen; i++) {
+ eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
+ if (eq <= 0)
+ return eq;
+ cand_m++;
+ canon_m++;
+ }
+
+ return 1;
+ }
+
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *cand_p, *canon_p;
+ __u16 vlen;
+
+ if (!btf_compat_fnproto(cand_type, canon_type))
+ return 0;
+ eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
+ if (eq <= 0)
+ return eq;
+ vlen = BTF_INFO_VLEN(cand_type->info);
+ cand_p = (struct btf_param *)(cand_type + 1);
+ canon_p = (struct btf_param *)(canon_type + 1);
+ for (i = 0; i < vlen; i++) {
+ eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
+ if (eq <= 0)
+ return eq;
+ cand_p++;
+ canon_p++;
+ }
+ return 1;
+ }
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Use hypothetical mapping, produced by successful type graph equivalence
+ * check, to augment existing struct/union canonical mapping, where possible.
+ *
+ * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
+ * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
+ * it doesn't matter if FWD type was part of canonical graph or candidate one,
+ * we are recording the mapping anyway. As opposed to carefulness required
+ * for struct/union correspondence mapping (described below), for FWD resolution
+ * it's not important, as by the time that FWD type (reference type) will be
+ * deduplicated all structs/unions will be deduped already anyway.
+ *
+ * Recording STRUCT/UNION mapping is purely a performance optimization and is
+ * not required for correctness. It needs to be done carefully to ensure that
+ * struct/union from candidate's type graph is not mapped into corresponding
+ * struct/union from canonical type graph that itself hasn't been resolved into
+ * canonical representative. The only guarantee we have is that canonical
+ * struct/union was determined as canonical and that won't change. But any
+ * types referenced through that struct/union fields could have been not yet
+ * resolved, so in case like that it's too early to establish any kind of
+ * correspondence between structs/unions.
+ *
+ * No canonical correspondence is derived for primitive types (they are already
+ * deduplicated completely already anyway) or reference types (they rely on
+ * stability of struct/union canonical relationship for equivalence checks).
+ */
+static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
+{
+ __u32 cand_type_id, targ_type_id;
+ __u16 t_kind, c_kind;
+ __u32 t_id, c_id;
+ int i;
+
+ for (i = 0; i < d->hypot_cnt; i++) {
+ cand_type_id = d->hypot_list[i];
+ targ_type_id = d->hypot_map[cand_type_id];
+ t_id = resolve_type_id(d, targ_type_id);
+ c_id = resolve_type_id(d, cand_type_id);
+ t_kind = BTF_INFO_KIND(d->btf->types[t_id]->info);
+ c_kind = BTF_INFO_KIND(d->btf->types[c_id]->info);
+ /*
+ * Resolve FWD into STRUCT/UNION.
+ * It's ok to resolve FWD into STRUCT/UNION that's not yet
+ * mapped to canonical representative (as opposed to
+ * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
+ * eventually that struct is going to be mapped and all resolved
+ * FWDs will automatically resolve to correct canonical
+ * representative. This will happen before ref type deduping,
+ * which critically depends on stability of these mapping. This
+ * stability is not a requirement for STRUCT/UNION equivalence
+ * checks, though.
+ */
+ if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
+ d->map[c_id] = t_id;
+ else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
+ d->map[t_id] = c_id;
+
+ if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
+ c_kind != BTF_KIND_FWD &&
+ is_type_mapped(d, c_id) &&
+ !is_type_mapped(d, t_id)) {
+ /*
+ * as a perf optimization, we can map struct/union
+ * that's part of type graph we just verified for
+ * equivalence. We can do that for struct/union that has
+ * canonical representative only, though.
+ */
+ d->map[t_id] = c_id;
+ }
+ }
+}
+
+/*
+ * Deduplicate struct/union types.
+ *
+ * For each struct/union type its type signature hash is calculated, taking
+ * into account type's name, size, number, order and names of fields, but
+ * ignoring type ID's referenced from fields, because they might not be deduped
+ * completely until after reference types deduplication phase. This type hash
+ * is used to iterate over all potential canonical types, sharing same hash.
+ * For each canonical candidate we check whether type graphs that they form
+ * (through referenced types in fields and so on) are equivalent using algorithm
+ * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
+ * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
+ * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
+ * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
+ * potentially map other structs/unions to their canonical representatives,
+ * if such relationship hasn't yet been established. This speeds up algorithm
+ * by eliminating some of the duplicate work.
+ *
+ * If no matching canonical representative was found, struct/union is marked
+ * as canonical for itself and is added into btf_dedup->dedup_table hash map
+ * for further look ups.
+ */
+static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
+{
+ struct btf_dedup_node *cand_node;
+ struct btf_type *cand_type, *t;
+ /* if we don't find equivalent type, then we are canonical */
+ __u32 new_id = type_id;
+ __u16 kind;
+ __u32 h;
+
+ /* already deduped or is in process of deduping (loop detected) */
+ if (d->map[type_id] <= BTF_MAX_NR_TYPES)
+ return 0;
+
+ t = d->btf->types[type_id];
+ kind = BTF_INFO_KIND(t->info);
+
+ if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
+ return 0;
+
+ h = btf_hash_struct(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ int eq;
+
+ /*
+ * Even though btf_dedup_is_equiv() checks for
+ * btf_shallow_equal_struct() internally when checking two
+ * structs (unions) for equivalence, we need to guard here
+ * from picking matching FWD type as a dedup candidate.
+ * This can happen due to hash collision. In such case just
+ * relying on btf_dedup_is_equiv() would lead to potentially
+ * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
+ * FWD and compatible STRUCT/UNION are considered equivalent.
+ */
+ cand_type = d->btf->types[cand_node->type_id];
+ if (!btf_shallow_equal_struct(t, cand_type))
+ continue;
+
+ btf_dedup_clear_hypot_map(d);
+ eq = btf_dedup_is_equiv(d, type_id, cand_node->type_id);
+ if (eq < 0)
+ return eq;
+ if (!eq)
+ continue;
+ new_id = cand_node->type_id;
+ btf_dedup_merge_hypot_map(d);
+ break;
+ }
+
+ d->map[type_id] = new_id;
+ if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int btf_dedup_struct_types(struct btf_dedup *d)
+{
+ int i, err;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ err = btf_dedup_struct_type(d, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * Deduplicate reference type.
+ *
+ * Once all primitive and struct/union types got deduplicated, we can easily
+ * deduplicate all other (reference) BTF types. This is done in two steps:
+ *
+ * 1. Resolve all referenced type IDs into their canonical type IDs. This
+ * resolution can be done either immediately for primitive or struct/union types
+ * (because they were deduped in previous two phases) or recursively for
+ * reference types. Recursion will always terminate at either primitive or
+ * struct/union type, at which point we can "unwind" chain of reference types
+ * one by one. There is no danger of encountering cycles because in C type
+ * system the only way to form type cycle is through struct/union, so any chain
+ * of reference types, even those taking part in a type cycle, will inevitably
+ * reach struct/union at some point.
+ *
+ * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
+ * becomes "stable", in the sense that no further deduplication will cause
+ * any changes to it. With that, it's now possible to calculate type's signature
+ * hash (this time taking into account referenced type IDs) and loop over all
+ * potential canonical representatives. If no match was found, current type
+ * will become canonical representative of itself and will be added into
+ * btf_dedup->dedup_table as another possible canonical representative.
+ */
+static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
+{
+ struct btf_dedup_node *cand_node;
+ struct btf_type *t, *cand;
+ /* if we don't find equivalent type, then we are representative type */
+ __u32 new_id = type_id;
+ int ref_type_id;
+ __u32 h;
+
+ if (d->map[type_id] == BTF_IN_PROGRESS_ID)
+ return -ELOOP;
+ if (d->map[type_id] <= BTF_MAX_NR_TYPES)
+ return resolve_type_id(d, type_id);
+
+ t = d->btf->types[type_id];
+ d->map[type_id] = BTF_IN_PROGRESS_ID;
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ ref_type_id = btf_dedup_ref_type(d, t->type);
+ if (ref_type_id < 0)
+ return ref_type_id;
+ t->type = ref_type_id;
+
+ h = btf_hash_common(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_common(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ }
+ break;
+
+ case BTF_KIND_ARRAY: {
+ struct btf_array *info = (struct btf_array *)(t + 1);
+
+ ref_type_id = btf_dedup_ref_type(d, info->type);
+ if (ref_type_id < 0)
+ return ref_type_id;
+ info->type = ref_type_id;
+
+ ref_type_id = btf_dedup_ref_type(d, info->index_type);
+ if (ref_type_id < 0)
+ return ref_type_id;
+ info->index_type = ref_type_id;
+
+ h = btf_hash_array(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_array(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ }
+ break;
+ }
+
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *param;
+ __u16 vlen;
+ int i;
+
+ ref_type_id = btf_dedup_ref_type(d, t->type);
+ if (ref_type_id < 0)
+ return ref_type_id;
+ t->type = ref_type_id;
+
+ vlen = BTF_INFO_VLEN(t->info);
+ param = (struct btf_param *)(t + 1);
+ for (i = 0; i < vlen; i++) {
+ ref_type_id = btf_dedup_ref_type(d, param->type);
+ if (ref_type_id < 0)
+ return ref_type_id;
+ param->type = ref_type_id;
+ param++;
+ }
+
+ h = btf_hash_fnproto(t);
+ for_each_dedup_cand(d, h, cand_node) {
+ cand = d->btf->types[cand_node->type_id];
+ if (btf_equal_fnproto(t, cand)) {
+ new_id = cand_node->type_id;
+ break;
+ }
+ }
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ d->map[type_id] = new_id;
+ if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
+ return -ENOMEM;
+
+ return new_id;
+}
+
+static int btf_dedup_ref_types(struct btf_dedup *d)
+{
+ int i, err;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ err = btf_dedup_ref_type(d, i);
+ if (err < 0)
+ return err;
+ }
+ btf_dedup_table_free(d);
+ return 0;
+}
+
+/*
+ * Compact types.
+ *
+ * After we established for each type its corresponding canonical representative
+ * type, we now can eliminate types that are not canonical and leave only
+ * canonical ones layed out sequentially in memory by copying them over
+ * duplicates. During compaction btf_dedup->hypot_map array is reused to store
+ * a map from original type ID to a new compacted type ID, which will be used
+ * during next phase to "fix up" type IDs, referenced from struct/union and
+ * reference types.
+ */
+static int btf_dedup_compact_types(struct btf_dedup *d)
+{
+ struct btf_type **new_types;
+ __u32 next_type_id = 1;
+ char *types_start, *p;
+ int i, len;
+
+ /* we are going to reuse hypot_map to store compaction remapping */
+ d->hypot_map[0] = 0;
+ for (i = 1; i <= d->btf->nr_types; i++)
+ d->hypot_map[i] = BTF_UNPROCESSED_ID;
+
+ types_start = d->btf->nohdr_data + d->btf->hdr->type_off;
+ p = types_start;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ if (d->map[i] != i)
+ continue;
+
+ len = btf_type_size(d->btf->types[i]);
+ if (len < 0)
+ return len;
+
+ memmove(p, d->btf->types[i], len);
+ d->hypot_map[i] = next_type_id;
+ d->btf->types[next_type_id] = (struct btf_type *)p;
+ p += len;
+ next_type_id++;
+ }
+
+ /* shrink struct btf's internal types index and update btf_header */
+ d->btf->nr_types = next_type_id - 1;
+ d->btf->types_size = d->btf->nr_types;
+ d->btf->hdr->type_len = p - types_start;
+ new_types = realloc(d->btf->types,
+ (1 + d->btf->nr_types) * sizeof(struct btf_type *));
+ if (!new_types)
+ return -ENOMEM;
+ d->btf->types = new_types;
+
+ /* make sure string section follows type information without gaps */
+ d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data;
+ memmove(p, d->btf->strings, d->btf->hdr->str_len);
+ d->btf->strings = p;
+ p += d->btf->hdr->str_len;
+
+ d->btf->data_size = p - (char *)d->btf->data;
+ return 0;
+}
+
+/*
+ * Figure out final (deduplicated and compacted) type ID for provided original
+ * `type_id` by first resolving it into corresponding canonical type ID and
+ * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
+ * which is populated during compaction phase.
+ */
+static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
+{
+ __u32 resolved_type_id, new_type_id;
+
+ resolved_type_id = resolve_type_id(d, type_id);
+ new_type_id = d->hypot_map[resolved_type_id];
+ if (new_type_id > BTF_MAX_NR_TYPES)
+ return -EINVAL;
+ return new_type_id;
+}
+
+/*
+ * Remap referenced type IDs into deduped type IDs.
+ *
+ * After BTF types are deduplicated and compacted, their final type IDs may
+ * differ from original ones. The map from original to a corresponding
+ * deduped type ID is stored in btf_dedup->hypot_map and is populated during
+ * compaction phase. During remapping phase we are rewriting all type IDs
+ * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
+ * their final deduped type IDs.
+ */
+static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
+{
+ struct btf_type *t = d->btf->types[type_id];
+ int i, r;
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_INT:
+ case BTF_KIND_ENUM:
+ break;
+
+ case BTF_KIND_FWD:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ case BTF_KIND_VAR:
+ r = btf_dedup_remap_type_id(d, t->type);
+ if (r < 0)
+ return r;
+ t->type = r;
+ break;
+
+ case BTF_KIND_ARRAY: {
+ struct btf_array *arr_info = (struct btf_array *)(t + 1);
+
+ r = btf_dedup_remap_type_id(d, arr_info->type);
+ if (r < 0)
+ return r;
+ arr_info->type = r;
+ r = btf_dedup_remap_type_id(d, arr_info->index_type);
+ if (r < 0)
+ return r;
+ arr_info->index_type = r;
+ break;
+ }
+
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ struct btf_member *member = (struct btf_member *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ for (i = 0; i < vlen; i++) {
+ r = btf_dedup_remap_type_id(d, member->type);
+ if (r < 0)
+ return r;
+ member->type = r;
+ member++;
+ }
+ break;
+ }
+
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *param = (struct btf_param *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ r = btf_dedup_remap_type_id(d, t->type);
+ if (r < 0)
+ return r;
+ t->type = r;
+
+ for (i = 0; i < vlen; i++) {
+ r = btf_dedup_remap_type_id(d, param->type);
+ if (r < 0)
+ return r;
+ param->type = r;
+ param++;
+ }
+ break;
+ }
+
+ case BTF_KIND_DATASEC: {
+ struct btf_var_secinfo *var = (struct btf_var_secinfo *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+
+ for (i = 0; i < vlen; i++) {
+ r = btf_dedup_remap_type_id(d, var->type);
+ if (r < 0)
+ return r;
+ var->type = r;
+ var++;
+ }
+ break;
+ }
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int btf_dedup_remap_types(struct btf_dedup *d)
+{
+ int i, r;
+
+ for (i = 1; i <= d->btf->nr_types; i++) {
+ r = btf_dedup_remap_type(d, i);
+ if (r < 0)
+ return r;
+ }
+ return 0;
+}
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index b0610dcdae6b..c7b399e81fce 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -21,6 +21,8 @@ struct btf;
struct btf_ext;
struct btf_type;
+struct bpf_object;
+
/*
* The .BTF.ext ELF section layout defined as
* struct btf_ext_header
@@ -55,33 +57,48 @@ struct btf_ext_header {
__u32 line_info_len;
};
-typedef int (*btf_print_fn_t)(const char *, ...)
- __attribute__((format(printf, 1, 2)));
-
LIBBPF_API void btf__free(struct btf *btf);
-LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log);
+LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size);
+LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
+LIBBPF_API int btf__load(struct btf *btf);
LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
const char *type_name);
+LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
__u32 id);
LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__fd(const struct btf *btf);
+LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
+LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
+ __u32 expected_key_size,
+ __u32 expected_value_size,
+ __u32 *key_type_id, __u32 *value_type_id);
+
+LIBBPF_API struct btf_ext *btf_ext__new(__u8 *data, __u32 size);
+LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
+LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext,
+ __u32 *size);
+LIBBPF_API int btf_ext__reloc_func_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const char *sec_name, __u32 insns_cnt,
+ void **func_info, __u32 *cnt);
+LIBBPF_API int btf_ext__reloc_line_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const char *sec_name, __u32 insns_cnt,
+ void **line_info, __u32 *cnt);
+LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
+LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
+
+struct btf_dedup_opts {
+ unsigned int dedup_table_size;
+ bool dont_resolve_fwds;
+};
-struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log);
-void btf_ext__free(struct btf_ext *btf_ext);
-int btf_ext__reloc_func_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **func_info, __u32 *func_info_len);
-int btf_ext__reloc_line_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **line_info, __u32 *cnt);
-__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
-__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
+LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
+ const struct btf_dedup_opts *opts);
#ifdef __cplusplus
} /* extern "C" */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 169e347c76f6..151f7ac1882e 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -7,6 +7,7 @@
* Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
* Copyright (C) 2015 Huawei Inc.
* Copyright (C) 2017 Nicira, Inc.
+ * Copyright (C) 2019 Isovalent, Inc.
*/
#ifndef _GNU_SOURCE
@@ -42,6 +43,7 @@
#include "bpf.h"
#include "btf.h"
#include "str_error.h"
+#include "libbpf_internal.h"
#ifndef EM_BPF
#define EM_BPF 247
@@ -51,41 +53,40 @@
#define BPF_FS_MAGIC 0xcafe4a11
#endif
+/* vsprintf() in __base_pr() uses nonliteral format string. It may break
+ * compilation if user enables corresponding warning. Disable it explicitly.
+ */
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+
#define __printf(a, b) __attribute__((format(printf, a, b)))
-__printf(1, 2)
-static int __base_pr(const char *format, ...)
+static int __base_pr(enum libbpf_print_level level, const char *format,
+ va_list args)
{
- va_list args;
- int err;
+ if (level == LIBBPF_DEBUG)
+ return 0;
- va_start(args, format);
- err = vfprintf(stderr, format, args);
- va_end(args);
- return err;
+ return vfprintf(stderr, format, args);
}
-static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
-static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
-static __printf(1, 2) libbpf_print_fn_t __pr_debug;
-
-#define __pr(func, fmt, ...) \
-do { \
- if ((func)) \
- (func)("libbpf: " fmt, ##__VA_ARGS__); \
-} while (0)
+static libbpf_print_fn_t __libbpf_pr = __base_pr;
-#define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
-#define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
-#define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
+void libbpf_set_print(libbpf_print_fn_t fn)
+{
+ __libbpf_pr = fn;
+}
-void libbpf_set_print(libbpf_print_fn_t warn,
- libbpf_print_fn_t info,
- libbpf_print_fn_t debug)
+__printf(2, 3)
+void libbpf_print(enum libbpf_print_level level, const char *format, ...)
{
- __pr_warning = warn;
- __pr_info = info;
- __pr_debug = debug;
+ va_list args;
+
+ if (!__libbpf_pr)
+ return;
+
+ va_start(args, format);
+ __libbpf_pr(level, format, args);
+ va_end(args);
}
#define STRERR_BUFSIZE 128
@@ -117,9 +118,20 @@ void libbpf_set_print(libbpf_print_fn_t warn,
# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
#endif
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+ return (__u64) (unsigned long) ptr;
+}
+
struct bpf_capabilities {
/* v4.14: kernel support for program & map names. */
__u32 name:1;
+ /* v5.2: kernel support for global data sections. */
+ __u32 global_data:1;
+ /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
+ __u32 btf_func:1;
+ /* BTF_KIND_VAR and BTF_KIND_DATASEC support */
+ __u32 btf_datasec:1;
};
/*
@@ -144,6 +156,7 @@ struct bpf_program {
enum {
RELO_LD64,
RELO_CALL,
+ RELO_DATA,
} type;
int insn_idx;
union {
@@ -152,6 +165,7 @@ struct bpf_program {
};
} *reloc_desc;
int nr_reloc;
+ int log_level;
struct {
int nr;
@@ -176,6 +190,19 @@ struct bpf_program {
__u32 line_info_cnt;
};
+enum libbpf_map_type {
+ LIBBPF_MAP_UNSPEC,
+ LIBBPF_MAP_DATA,
+ LIBBPF_MAP_BSS,
+ LIBBPF_MAP_RODATA,
+};
+
+static const char * const libbpf_type_to_btf_name[] = {
+ [LIBBPF_MAP_DATA] = ".data",
+ [LIBBPF_MAP_BSS] = ".bss",
+ [LIBBPF_MAP_RODATA] = ".rodata",
+};
+
struct bpf_map {
int fd;
char *name;
@@ -187,11 +214,18 @@ struct bpf_map {
__u32 btf_value_type_id;
void *priv;
bpf_map_clear_priv_t clear_priv;
+ enum libbpf_map_type libbpf_type;
+};
+
+struct bpf_secdata {
+ void *rodata;
+ void *data;
};
static LIST_HEAD(bpf_objects_list);
struct bpf_object {
+ char name[BPF_OBJ_NAME_LEN];
char license[64];
__u32 kern_version;
@@ -199,6 +233,7 @@ struct bpf_object {
size_t nr_programs;
struct bpf_map *maps;
size_t nr_maps;
+ struct bpf_secdata sections;
bool loaded;
bool has_pseudo_calls;
@@ -214,6 +249,9 @@ struct bpf_object {
Elf *elf;
GElf_Ehdr ehdr;
Elf_Data *symbols;
+ Elf_Data *data;
+ Elf_Data *rodata;
+ Elf_Data *bss;
size_t strtabidx;
struct {
GElf_Shdr shdr;
@@ -222,6 +260,9 @@ struct bpf_object {
int nr_reloc;
int maps_shndx;
int text_shndx;
+ int data_shndx;
+ int rodata_shndx;
+ int bss_shndx;
} efile;
/*
* All loaded bpf_object is linked in a list, which is
@@ -312,7 +353,7 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
return -EINVAL;
}
- bzero(prog, sizeof(*prog));
+ memset(prog, 0, sizeof(*prog));
prog->section_name = strdup(section_name);
if (!prog->section_name) {
@@ -443,6 +484,7 @@ static struct bpf_object *bpf_object__new(const char *path,
size_t obj_buf_sz)
{
struct bpf_object *obj;
+ char *end;
obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
if (!obj) {
@@ -451,8 +493,14 @@ static struct bpf_object *bpf_object__new(const char *path,
}
strcpy(obj->path, path);
- obj->efile.fd = -1;
+ /* Using basename() GNU version which doesn't modify arg. */
+ strncpy(obj->name, basename((void *)path),
+ sizeof(obj->name) - 1);
+ end = strchr(obj->name, '.');
+ if (end)
+ *end = 0;
+ obj->efile.fd = -1;
/*
* Caller of this function should also calls
* bpf_object__elf_finish() after data collection to return
@@ -462,6 +510,9 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.obj_buf = obj_buf;
obj->efile.obj_buf_sz = obj_buf_sz;
obj->efile.maps_shndx = -1;
+ obj->efile.data_shndx = -1;
+ obj->efile.rodata_shndx = -1;
+ obj->efile.bss_shndx = -1;
obj->loaded = false;
@@ -480,6 +531,9 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
obj->efile.elf = NULL;
}
obj->efile.symbols = NULL;
+ obj->efile.data = NULL;
+ obj->efile.rodata = NULL;
+ obj->efile.bss = NULL;
zfree(&obj->efile.reloc);
obj->efile.nr_reloc = 0;
@@ -621,27 +675,182 @@ static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
return false;
}
+static int bpf_object_search_section_size(const struct bpf_object *obj,
+ const char *name, size_t *d_size)
+{
+ const GElf_Ehdr *ep = &obj->efile.ehdr;
+ Elf *elf = obj->efile.elf;
+ Elf_Scn *scn = NULL;
+ int idx = 0;
+
+ while ((scn = elf_nextscn(elf, scn)) != NULL) {
+ const char *sec_name;
+ Elf_Data *data;
+ GElf_Shdr sh;
+
+ idx++;
+ if (gelf_getshdr(scn, &sh) != &sh) {
+ pr_warning("failed to get section(%d) header from %s\n",
+ idx, obj->path);
+ return -EIO;
+ }
+
+ sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
+ if (!sec_name) {
+ pr_warning("failed to get section(%d) name from %s\n",
+ idx, obj->path);
+ return -EIO;
+ }
+
+ if (strcmp(name, sec_name))
+ continue;
+
+ data = elf_getdata(scn, 0);
+ if (!data) {
+ pr_warning("failed to get section(%d) data from %s(%s)\n",
+ idx, name, obj->path);
+ return -EIO;
+ }
+
+ *d_size = data->d_size;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+int bpf_object__section_size(const struct bpf_object *obj, const char *name,
+ __u32 *size)
+{
+ int ret = -ENOENT;
+ size_t d_size;
+
+ *size = 0;
+ if (!name) {
+ return -EINVAL;
+ } else if (!strcmp(name, ".data")) {
+ if (obj->efile.data)
+ *size = obj->efile.data->d_size;
+ } else if (!strcmp(name, ".bss")) {
+ if (obj->efile.bss)
+ *size = obj->efile.bss->d_size;
+ } else if (!strcmp(name, ".rodata")) {
+ if (obj->efile.rodata)
+ *size = obj->efile.rodata->d_size;
+ } else {
+ ret = bpf_object_search_section_size(obj, name, &d_size);
+ if (!ret)
+ *size = d_size;
+ }
+
+ return *size ? 0 : ret;
+}
+
+int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
+ __u32 *off)
+{
+ Elf_Data *symbols = obj->efile.symbols;
+ const char *sname;
+ size_t si;
+
+ if (!name || !off)
+ return -EINVAL;
+
+ for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
+ GElf_Sym sym;
+
+ if (!gelf_getsym(symbols, si, &sym))
+ continue;
+ if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
+ GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
+ continue;
+
+ sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
+ sym.st_name);
+ if (!sname) {
+ pr_warning("failed to get sym name string for var %s\n",
+ name);
+ return -EIO;
+ }
+ if (strcmp(name, sname) == 0) {
+ *off = sym.st_value;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static bool bpf_object__has_maps(const struct bpf_object *obj)
+{
+ return obj->efile.maps_shndx >= 0 ||
+ obj->efile.data_shndx >= 0 ||
+ obj->efile.rodata_shndx >= 0 ||
+ obj->efile.bss_shndx >= 0;
+}
+
+static int
+bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map,
+ enum libbpf_map_type type, Elf_Data *data,
+ void **data_buff)
+{
+ struct bpf_map_def *def = &map->def;
+ char map_name[BPF_OBJ_NAME_LEN];
+
+ map->libbpf_type = type;
+ map->offset = ~(typeof(map->offset))0;
+ snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name,
+ libbpf_type_to_btf_name[type]);
+ map->name = strdup(map_name);
+ if (!map->name) {
+ pr_warning("failed to alloc map name\n");
+ return -ENOMEM;
+ }
+
+ def->type = BPF_MAP_TYPE_ARRAY;
+ def->key_size = sizeof(int);
+ def->value_size = data->d_size;
+ def->max_entries = 1;
+ def->map_flags = type == LIBBPF_MAP_RODATA ?
+ BPF_F_RDONLY_PROG : 0;
+ if (data_buff) {
+ *data_buff = malloc(data->d_size);
+ if (!*data_buff) {
+ zfree(&map->name);
+ pr_warning("failed to alloc map content buffer\n");
+ return -ENOMEM;
+ }
+ memcpy(*data_buff, data->d_buf, data->d_size);
+ }
+
+ pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
+ return 0;
+}
+
static int
bpf_object__init_maps(struct bpf_object *obj, int flags)
{
+ int i, map_idx, map_def_sz = 0, nr_syms, nr_maps = 0, nr_maps_glob = 0;
bool strict = !(flags & MAPS_RELAX_COMPAT);
- int i, map_idx, map_def_sz, nr_maps = 0;
- Elf_Scn *scn;
- Elf_Data *data;
Elf_Data *symbols = obj->efile.symbols;
+ Elf_Data *data = NULL;
+ int ret = 0;
- if (obj->efile.maps_shndx < 0)
- return -EINVAL;
if (!symbols)
return -EINVAL;
+ nr_syms = symbols->d_size / sizeof(GElf_Sym);
- scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
- if (scn)
- data = elf_getdata(scn, NULL);
- if (!scn || !data) {
- pr_warning("failed to get Elf_Data from map section %d\n",
- obj->efile.maps_shndx);
- return -EINVAL;
+ if (obj->efile.maps_shndx >= 0) {
+ Elf_Scn *scn = elf_getscn(obj->efile.elf,
+ obj->efile.maps_shndx);
+
+ if (scn)
+ data = elf_getdata(scn, NULL);
+ if (!scn || !data) {
+ pr_warning("failed to get Elf_Data from map section %d\n",
+ obj->efile.maps_shndx);
+ return -EINVAL;
+ }
}
/*
@@ -651,7 +860,16 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
*
* TODO: Detect array of map and report error.
*/
- for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
+ if (obj->caps.global_data) {
+ if (obj->efile.data_shndx >= 0)
+ nr_maps_glob++;
+ if (obj->efile.rodata_shndx >= 0)
+ nr_maps_glob++;
+ if (obj->efile.bss_shndx >= 0)
+ nr_maps_glob++;
+ }
+
+ for (i = 0; data && i < nr_syms; i++) {
GElf_Sym sym;
if (!gelf_getsym(symbols, i, &sym))
@@ -661,22 +879,24 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
nr_maps++;
}
- /* Alloc obj->maps and fill nr_maps. */
- pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
- nr_maps, data->d_size);
-
- if (!nr_maps)
+ if (!nr_maps && !nr_maps_glob)
return 0;
/* Assume equally sized map definitions */
- map_def_sz = data->d_size / nr_maps;
- if (!data->d_size || (data->d_size % nr_maps) != 0) {
- pr_warning("unable to determine map definition size "
- "section %s, %d maps in %zd bytes\n",
- obj->path, nr_maps, data->d_size);
- return -EINVAL;
+ if (data) {
+ pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
+ nr_maps, data->d_size);
+
+ map_def_sz = data->d_size / nr_maps;
+ if (!data->d_size || (data->d_size % nr_maps) != 0) {
+ pr_warning("unable to determine map definition size "
+ "section %s, %d maps in %zd bytes\n",
+ obj->path, nr_maps, data->d_size);
+ return -EINVAL;
+ }
}
+ nr_maps += nr_maps_glob;
obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
if (!obj->maps) {
pr_warning("alloc maps for object failed\n");
@@ -697,7 +917,7 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
/*
* Fill obj->maps using data in "maps" section.
*/
- for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
+ for (i = 0, map_idx = 0; data && i < nr_syms; i++) {
GElf_Sym sym;
const char *map_name;
struct bpf_map_def *def;
@@ -710,6 +930,8 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
map_name = elf_strptr(obj->efile.elf,
obj->efile.strtabidx,
sym.st_name);
+
+ obj->maps[map_idx].libbpf_type = LIBBPF_MAP_UNSPEC;
obj->maps[map_idx].offset = sym.st_value;
if (sym.st_value + map_def_sz > data->d_size) {
pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
@@ -758,8 +980,31 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
map_idx++;
}
- qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
- return 0;
+ if (!obj->caps.global_data)
+ goto finalize;
+
+ /*
+ * Populate rest of obj->maps with libbpf internal maps.
+ */
+ if (obj->efile.data_shndx >= 0)
+ ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++],
+ LIBBPF_MAP_DATA,
+ obj->efile.data,
+ &obj->sections.data);
+ if (!ret && obj->efile.rodata_shndx >= 0)
+ ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++],
+ LIBBPF_MAP_RODATA,
+ obj->efile.rodata,
+ &obj->sections.rodata);
+ if (!ret && obj->efile.bss_shndx >= 0)
+ ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++],
+ LIBBPF_MAP_BSS,
+ obj->efile.bss, NULL);
+finalize:
+ if (!ret)
+ qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]),
+ compare_bpf_map);
+ return ret;
}
static bool section_have_execinstr(struct bpf_object *obj, int idx)
@@ -780,11 +1025,80 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
return false;
}
+static void bpf_object__sanitize_btf(struct bpf_object *obj)
+{
+ bool has_datasec = obj->caps.btf_datasec;
+ bool has_func = obj->caps.btf_func;
+ struct btf *btf = obj->btf;
+ struct btf_type *t;
+ int i, j, vlen;
+ __u16 kind;
+
+ if (!obj->btf || (has_func && has_datasec))
+ return;
+
+ for (i = 1; i <= btf__get_nr_types(btf); i++) {
+ t = (struct btf_type *)btf__type_by_id(btf, i);
+ kind = BTF_INFO_KIND(t->info);
+
+ if (!has_datasec && kind == BTF_KIND_VAR) {
+ /* replace VAR with INT */
+ t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
+ t->size = sizeof(int);
+ *(int *)(t+1) = BTF_INT_ENC(0, 0, 32);
+ } else if (!has_datasec && kind == BTF_KIND_DATASEC) {
+ /* replace DATASEC with STRUCT */
+ struct btf_var_secinfo *v = (void *)(t + 1);
+ struct btf_member *m = (void *)(t + 1);
+ struct btf_type *vt;
+ char *name;
+
+ name = (char *)btf__name_by_offset(btf, t->name_off);
+ while (*name) {
+ if (*name == '.')
+ *name = '_';
+ name++;
+ }
+
+ vlen = BTF_INFO_VLEN(t->info);
+ t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
+ for (j = 0; j < vlen; j++, v++, m++) {
+ /* order of field assignments is important */
+ m->offset = v->offset * 8;
+ m->type = v->type;
+ /* preserve variable name as member name */
+ vt = (void *)btf__type_by_id(btf, v->type);
+ m->name_off = vt->name_off;
+ }
+ } else if (!has_func && kind == BTF_KIND_FUNC_PROTO) {
+ /* replace FUNC_PROTO with ENUM */
+ vlen = BTF_INFO_VLEN(t->info);
+ t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
+ t->size = sizeof(__u32); /* kernel enforced */
+ } else if (!has_func && kind == BTF_KIND_FUNC) {
+ /* replace FUNC with TYPEDEF */
+ t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
+ }
+ }
+}
+
+static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
+{
+ if (!obj->btf_ext)
+ return;
+
+ if (!obj->caps.btf_func) {
+ btf_ext__free(obj->btf_ext);
+ obj->btf_ext = NULL;
+ }
+}
+
static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
{
Elf *elf = obj->efile.elf;
GElf_Ehdr *ep = &obj->efile.ehdr;
Elf_Data *btf_ext_data = NULL;
+ Elf_Data *btf_data = NULL;
Elf_Scn *scn = NULL;
int idx = 0, err = 0;
@@ -828,24 +1142,18 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
(int)sh.sh_link, (unsigned long)sh.sh_flags,
(int)sh.sh_type);
- if (strcmp(name, "license") == 0)
+ if (strcmp(name, "license") == 0) {
err = bpf_object__init_license(obj,
data->d_buf,
data->d_size);
- else if (strcmp(name, "version") == 0)
+ } else if (strcmp(name, "version") == 0) {
err = bpf_object__init_kversion(obj,
data->d_buf,
data->d_size);
- else if (strcmp(name, "maps") == 0)
+ } else if (strcmp(name, "maps") == 0) {
obj->efile.maps_shndx = idx;
- else if (strcmp(name, BTF_ELF_SEC) == 0) {
- obj->btf = btf__new(data->d_buf, data->d_size,
- __pr_debug);
- if (IS_ERR(obj->btf)) {
- pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
- BTF_ELF_SEC, PTR_ERR(obj->btf));
- obj->btf = NULL;
- }
+ } else if (strcmp(name, BTF_ELF_SEC) == 0) {
+ btf_data = data;
} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
btf_ext_data = data;
} else if (sh.sh_type == SHT_SYMTAB) {
@@ -857,20 +1165,28 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
obj->efile.symbols = data;
obj->efile.strtabidx = sh.sh_link;
}
- } else if ((sh.sh_type == SHT_PROGBITS) &&
- (sh.sh_flags & SHF_EXECINSTR) &&
- (data->d_size > 0)) {
- if (strcmp(name, ".text") == 0)
- obj->efile.text_shndx = idx;
- err = bpf_object__add_program(obj, data->d_buf,
- data->d_size, name, idx);
- if (err) {
- char errmsg[STRERR_BUFSIZE];
- char *cp = libbpf_strerror_r(-err, errmsg,
- sizeof(errmsg));
-
- pr_warning("failed to alloc program %s (%s): %s",
- name, obj->path, cp);
+ } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
+ if (sh.sh_flags & SHF_EXECINSTR) {
+ if (strcmp(name, ".text") == 0)
+ obj->efile.text_shndx = idx;
+ err = bpf_object__add_program(obj, data->d_buf,
+ data->d_size, name, idx);
+ if (err) {
+ char errmsg[STRERR_BUFSIZE];
+ char *cp = libbpf_strerror_r(-err, errmsg,
+ sizeof(errmsg));
+
+ pr_warning("failed to alloc program %s (%s): %s",
+ name, obj->path, cp);
+ }
+ } else if (strcmp(name, ".data") == 0) {
+ obj->efile.data = data;
+ obj->efile.data_shndx = idx;
+ } else if (strcmp(name, ".rodata") == 0) {
+ obj->efile.rodata = data;
+ obj->efile.rodata_shndx = idx;
+ } else {
+ pr_debug("skip section(%d) %s\n", idx, name);
}
} else if (sh.sh_type == SHT_REL) {
void *reloc = obj->efile.reloc;
@@ -898,6 +1214,9 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
obj->efile.reloc[n].shdr = sh;
obj->efile.reloc[n].data = data;
}
+ } else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) {
+ obj->efile.bss = data;
+ obj->efile.bss_shndx = idx;
} else {
pr_debug("skip section(%d) %s\n", idx, name);
}
@@ -909,23 +1228,45 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
pr_warning("Corrupted ELF file: index of strtab invalid\n");
return LIBBPF_ERRNO__FORMAT;
}
+ if (btf_data) {
+ obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
+ if (IS_ERR(obj->btf)) {
+ pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
+ BTF_ELF_SEC, PTR_ERR(obj->btf));
+ obj->btf = NULL;
+ } else {
+ err = btf__finalize_data(obj, obj->btf);
+ if (!err) {
+ bpf_object__sanitize_btf(obj);
+ err = btf__load(obj->btf);
+ }
+ if (err) {
+ pr_warning("Error finalizing and loading %s into kernel: %d. Ignored and continue.\n",
+ BTF_ELF_SEC, err);
+ btf__free(obj->btf);
+ obj->btf = NULL;
+ err = 0;
+ }
+ }
+ }
if (btf_ext_data) {
if (!obj->btf) {
pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
BTF_EXT_ELF_SEC, BTF_ELF_SEC);
} else {
obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
- btf_ext_data->d_size,
- __pr_debug);
+ btf_ext_data->d_size);
if (IS_ERR(obj->btf_ext)) {
pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
BTF_EXT_ELF_SEC,
PTR_ERR(obj->btf_ext));
obj->btf_ext = NULL;
+ } else {
+ bpf_object__sanitize_btf_ext(obj);
}
}
}
- if (obj->efile.maps_shndx >= 0) {
+ if (bpf_object__has_maps(obj)) {
err = bpf_object__init_maps(obj, flags);
if (err)
goto out;
@@ -961,13 +1302,46 @@ bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
return NULL;
}
+static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
+ int shndx)
+{
+ return shndx == obj->efile.data_shndx ||
+ shndx == obj->efile.bss_shndx ||
+ shndx == obj->efile.rodata_shndx;
+}
+
+static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
+ int shndx)
+{
+ return shndx == obj->efile.maps_shndx;
+}
+
+static bool bpf_object__relo_in_known_section(const struct bpf_object *obj,
+ int shndx)
+{
+ return shndx == obj->efile.text_shndx ||
+ bpf_object__shndx_is_maps(obj, shndx) ||
+ bpf_object__shndx_is_data(obj, shndx);
+}
+
+static enum libbpf_map_type
+bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
+{
+ if (shndx == obj->efile.data_shndx)
+ return LIBBPF_MAP_DATA;
+ else if (shndx == obj->efile.bss_shndx)
+ return LIBBPF_MAP_BSS;
+ else if (shndx == obj->efile.rodata_shndx)
+ return LIBBPF_MAP_RODATA;
+ else
+ return LIBBPF_MAP_UNSPEC;
+}
+
static int
bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
Elf_Data *data, struct bpf_object *obj)
{
Elf_Data *symbols = obj->efile.symbols;
- int text_shndx = obj->efile.text_shndx;
- int maps_shndx = obj->efile.maps_shndx;
struct bpf_map *maps = obj->maps;
size_t nr_maps = obj->nr_maps;
int i, nrels;
@@ -987,7 +1361,10 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
GElf_Sym sym;
GElf_Rel rel;
unsigned int insn_idx;
+ unsigned int shdr_idx;
struct bpf_insn *insns = prog->insns;
+ enum libbpf_map_type type;
+ const char *name;
size_t map_idx;
if (!gelf_getrel(data, i, &rel)) {
@@ -1002,13 +1379,18 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
GELF_R_SYM(rel.r_info));
return -LIBBPF_ERRNO__FORMAT;
}
- pr_debug("relo for %lld value %lld name %d\n",
+
+ name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
+ sym.st_name) ? : "<?>";
+
+ pr_debug("relo for %lld value %lld name %d (\'%s\')\n",
(long long) (rel.r_info >> 32),
- (long long) sym.st_value, sym.st_name);
+ (long long) sym.st_value, sym.st_name, name);
- if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
- pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
- prog->section_name, sym.st_shndx);
+ shdr_idx = sym.st_shndx;
+ if (!bpf_object__relo_in_known_section(obj, shdr_idx)) {
+ pr_warning("Program '%s' contains unrecognized relo data pointing to section %u\n",
+ prog->section_name, shdr_idx);
return -LIBBPF_ERRNO__RELOC;
}
@@ -1033,97 +1415,73 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
return -LIBBPF_ERRNO__RELOC;
}
- /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
- for (map_idx = 0; map_idx < nr_maps; map_idx++) {
- if (maps[map_idx].offset == sym.st_value) {
- pr_debug("relocation: find map %zd (%s) for insn %u\n",
- map_idx, maps[map_idx].name, insn_idx);
- break;
+ if (bpf_object__shndx_is_maps(obj, shdr_idx) ||
+ bpf_object__shndx_is_data(obj, shdr_idx)) {
+ type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
+ if (type != LIBBPF_MAP_UNSPEC) {
+ if (GELF_ST_BIND(sym.st_info) == STB_GLOBAL) {
+ pr_warning("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n",
+ name, insn_idx, insns[insn_idx].code);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ if (!obj->caps.global_data) {
+ pr_warning("bpf: relocation: kernel does not support global \'%s\' variable access in insns[%d]\n",
+ name, insn_idx);
+ return -LIBBPF_ERRNO__RELOC;
+ }
}
- }
- if (map_idx >= nr_maps) {
- pr_warning("bpf relocation: map_idx %d large than %d\n",
- (int)map_idx, (int)nr_maps - 1);
- return -LIBBPF_ERRNO__RELOC;
- }
+ for (map_idx = 0; map_idx < nr_maps; map_idx++) {
+ if (maps[map_idx].libbpf_type != type)
+ continue;
+ if (type != LIBBPF_MAP_UNSPEC ||
+ (type == LIBBPF_MAP_UNSPEC &&
+ maps[map_idx].offset == sym.st_value)) {
+ pr_debug("relocation: find map %zd (%s) for insn %u\n",
+ map_idx, maps[map_idx].name, insn_idx);
+ break;
+ }
+ }
+
+ if (map_idx >= nr_maps) {
+ pr_warning("bpf relocation: map_idx %d large than %d\n",
+ (int)map_idx, (int)nr_maps - 1);
+ return -LIBBPF_ERRNO__RELOC;
+ }
- prog->reloc_desc[i].type = RELO_LD64;
- prog->reloc_desc[i].insn_idx = insn_idx;
- prog->reloc_desc[i].map_idx = map_idx;
+ prog->reloc_desc[i].type = type != LIBBPF_MAP_UNSPEC ?
+ RELO_DATA : RELO_LD64;
+ prog->reloc_desc[i].insn_idx = insn_idx;
+ prog->reloc_desc[i].map_idx = map_idx;
+ }
}
return 0;
}
static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
{
- const struct btf_type *container_type;
- const struct btf_member *key, *value;
struct bpf_map_def *def = &map->def;
- const size_t max_name = 256;
- char container_name[max_name];
- __s64 key_size, value_size;
- __s32 container_id;
-
- if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
- max_name) {
- pr_warning("map:%s length of '____btf_map_%s' is too long\n",
- map->name, map->name);
- return -EINVAL;
- }
-
- container_id = btf__find_by_name(btf, container_name);
- if (container_id < 0) {
- pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
- map->name, container_name);
- return container_id;
- }
-
- container_type = btf__type_by_id(btf, container_id);
- if (!container_type) {
- pr_warning("map:%s cannot find BTF type for container_id:%u\n",
- map->name, container_id);
- return -EINVAL;
- }
-
- if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
- BTF_INFO_VLEN(container_type->info) < 2) {
- pr_warning("map:%s container_name:%s is an invalid container struct\n",
- map->name, container_name);
- return -EINVAL;
- }
-
- key = (struct btf_member *)(container_type + 1);
- value = key + 1;
-
- key_size = btf__resolve_size(btf, key->type);
- if (key_size < 0) {
- pr_warning("map:%s invalid BTF key_type_size\n",
- map->name);
- return key_size;
- }
-
- if (def->key_size != key_size) {
- pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
- map->name, (__u32)key_size, def->key_size);
- return -EINVAL;
- }
-
- value_size = btf__resolve_size(btf, value->type);
- if (value_size < 0) {
- pr_warning("map:%s invalid BTF value_type_size\n", map->name);
- return value_size;
- }
+ __u32 key_type_id = 0, value_type_id = 0;
+ int ret;
- if (def->value_size != value_size) {
- pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
- map->name, (__u32)value_size, def->value_size);
- return -EINVAL;
+ if (!bpf_map__is_internal(map)) {
+ ret = btf__get_map_kv_tids(btf, map->name, def->key_size,
+ def->value_size, &key_type_id,
+ &value_type_id);
+ } else {
+ /*
+ * LLVM annotates global data differently in BTF, that is,
+ * only as '.data', '.bss' or '.rodata'.
+ */
+ ret = btf__find_by_name(btf,
+ libbpf_type_to_btf_name[map->libbpf_type]);
}
+ if (ret < 0)
+ return ret;
- map->btf_key_type_id = key->type;
- map->btf_value_type_id = value->type;
-
+ map->btf_key_type_id = key_type_id;
+ map->btf_value_type_id = bpf_map__is_internal(map) ?
+ ret : value_type_id;
return 0;
}
@@ -1174,6 +1532,20 @@ err_free_new_name:
return -errno;
}
+int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
+{
+ if (!map || !max_entries)
+ return -EINVAL;
+
+ /* If map already created, its attributes can't be changed. */
+ if (map->fd >= 0)
+ return -EBUSY;
+
+ map->def.max_entries = max_entries;
+
+ return 0;
+}
+
static int
bpf_object__probe_name(struct bpf_object *obj)
{
@@ -1215,9 +1587,150 @@ bpf_object__probe_name(struct bpf_object *obj)
}
static int
+bpf_object__probe_global_data(struct bpf_object *obj)
+{
+ struct bpf_load_program_attr prg_attr;
+ struct bpf_create_map_attr map_attr;
+ char *cp, errmsg[STRERR_BUFSIZE];
+ struct bpf_insn insns[] = {
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int ret, map;
+
+ memset(&map_attr, 0, sizeof(map_attr));
+ map_attr.map_type = BPF_MAP_TYPE_ARRAY;
+ map_attr.key_size = sizeof(int);
+ map_attr.value_size = 32;
+ map_attr.max_entries = 1;
+
+ map = bpf_create_map_xattr(&map_attr);
+ if (map < 0) {
+ cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+ pr_warning("Error in %s():%s(%d). Couldn't create simple array map.\n",
+ __func__, cp, errno);
+ return -errno;
+ }
+
+ insns[0].imm = map;
+
+ memset(&prg_attr, 0, sizeof(prg_attr));
+ prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ prg_attr.insns = insns;
+ prg_attr.insns_cnt = ARRAY_SIZE(insns);
+ prg_attr.license = "GPL";
+
+ ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
+ if (ret >= 0) {
+ obj->caps.global_data = 1;
+ close(ret);
+ }
+
+ close(map);
+ return 0;
+}
+
+static int bpf_object__probe_btf_func(struct bpf_object *obj)
+{
+ const char strs[] = "\0int\0x\0a";
+ /* void x(int a) {} */
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* FUNC_PROTO */ /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
+ BTF_PARAM_ENC(7, 1),
+ /* FUNC x */ /* [3] */
+ BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
+ };
+ int btf_fd;
+
+ btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs));
+ if (btf_fd >= 0) {
+ obj->caps.btf_func = 1;
+ close(btf_fd);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
+{
+ const char strs[] = "\0x\0.data";
+ /* static int a; */
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* VAR x */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
+ BTF_VAR_STATIC,
+ /* DATASEC val */ /* [3] */
+ BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ };
+ int btf_fd;
+
+ btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs));
+ if (btf_fd >= 0) {
+ obj->caps.btf_datasec = 1;
+ close(btf_fd);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
bpf_object__probe_caps(struct bpf_object *obj)
{
- return bpf_object__probe_name(obj);
+ int (*probe_fn[])(struct bpf_object *obj) = {
+ bpf_object__probe_name,
+ bpf_object__probe_global_data,
+ bpf_object__probe_btf_func,
+ bpf_object__probe_btf_datasec,
+ };
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(probe_fn); i++) {
+ ret = probe_fn[i](obj);
+ if (ret < 0)
+ pr_debug("Probe #%d failed with %d.\n", i, ret);
+ }
+
+ return 0;
+}
+
+static int
+bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
+{
+ char *cp, errmsg[STRERR_BUFSIZE];
+ int err, zero = 0;
+ __u8 *data;
+
+ /* Nothing to do here since kernel already zero-initializes .bss map. */
+ if (map->libbpf_type == LIBBPF_MAP_BSS)
+ return 0;
+
+ data = map->libbpf_type == LIBBPF_MAP_DATA ?
+ obj->sections.data : obj->sections.rodata;
+
+ err = bpf_map_update_elem(map->fd, &zero, data, 0);
+ /* Freeze .rodata map as read-only from syscall side. */
+ if (!err && map->libbpf_type == LIBBPF_MAP_RODATA) {
+ err = bpf_map_freeze(map->fd);
+ if (err) {
+ cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+ pr_warning("Error freezing map(%s) as read-only: %s\n",
+ map->name, cp);
+ err = 0;
+ }
+ }
+ return err;
}
static int
@@ -1277,6 +1790,7 @@ bpf_object__create_maps(struct bpf_object *obj)
size_t j;
err = *pfd;
+err_out:
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
pr_warning("failed to create map (name: '%s'): %s\n",
map->name, cp);
@@ -1284,6 +1798,15 @@ bpf_object__create_maps(struct bpf_object *obj)
zclose(obj->maps[j].fd);
return err;
}
+
+ if (bpf_map__is_internal(map)) {
+ err = bpf_object__populate_internal_map(obj, map);
+ if (err < 0) {
+ zclose(*pfd);
+ goto err_out;
+ }
+ }
+
pr_debug("create map %s: fd=%d\n", map->name, *pfd);
}
@@ -1438,21 +1961,29 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
return 0;
for (i = 0; i < prog->nr_reloc; i++) {
- if (prog->reloc_desc[i].type == RELO_LD64) {
+ if (prog->reloc_desc[i].type == RELO_LD64 ||
+ prog->reloc_desc[i].type == RELO_DATA) {
+ bool relo_data = prog->reloc_desc[i].type == RELO_DATA;
struct bpf_insn *insns = prog->insns;
int insn_idx, map_idx;
insn_idx = prog->reloc_desc[i].insn_idx;
map_idx = prog->reloc_desc[i].map_idx;
- if (insn_idx >= (int)prog->insns_cnt) {
+ if (insn_idx + 1 >= (int)prog->insns_cnt) {
pr_warning("relocation out of range: '%s'\n",
prog->section_name);
return -LIBBPF_ERRNO__RELOC;
}
- insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
+
+ if (!relo_data) {
+ insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
+ } else {
+ insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE;
+ insns[insn_idx + 1].imm = insns[insn_idx].imm;
+ }
insns[insn_idx].imm = obj->maps[map_idx].fd;
- } else {
+ } else if (prog->reloc_desc[i].type == RELO_CALL) {
err = bpf_program__reloc_text(prog, obj,
&prog->reloc_desc[i]);
if (err)
@@ -1527,6 +2058,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
{
struct bpf_load_program_attr load_attr;
char *cp, errmsg[STRERR_BUFSIZE];
+ int log_buf_size = BPF_LOG_BUF_SIZE;
char *log_buf;
int ret;
@@ -1547,21 +2079,30 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.line_info = prog->line_info;
load_attr.line_info_rec_size = prog->line_info_rec_size;
load_attr.line_info_cnt = prog->line_info_cnt;
+ load_attr.log_level = prog->log_level;
if (!load_attr.insns || !load_attr.insns_cnt)
return -EINVAL;
- log_buf = malloc(BPF_LOG_BUF_SIZE);
+retry_load:
+ log_buf = malloc(log_buf_size);
if (!log_buf)
pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
- ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
+ ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
if (ret >= 0) {
+ if (load_attr.log_level)
+ pr_debug("verifier log:\n%s", log_buf);
*pfd = ret;
ret = 0;
goto out;
}
+ if (errno == ENOSPC) {
+ log_buf_size <<= 1;
+ free(log_buf);
+ goto retry_load;
+ }
ret = -LIBBPF_ERRNO__LOAD;
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
pr_warning("load bpf program failed: %s\n", cp);
@@ -1637,7 +2178,7 @@ bpf_program__load(struct bpf_program *prog,
struct bpf_prog_prep_result result;
bpf_program_prep_t preprocessor = prog->preprocessor;
- bzero(&result, sizeof(result));
+ memset(&result, 0, sizeof(result));
err = preprocessor(prog, i, prog->insns,
prog->insns_cnt, &result);
if (err) {
@@ -1726,7 +2267,9 @@ static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
case BPF_PROG_TYPE_UNSPEC:
case BPF_PROG_TYPE_TRACEPOINT:
case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
case BPF_PROG_TYPE_PERF_EVENT:
+ case BPF_PROG_TYPE_CGROUP_SYSCTL:
return false;
case BPF_PROG_TYPE_KPROBE:
default:
@@ -1762,6 +2305,7 @@ __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
CHECK_ERR(bpf_object__elf_init(obj), err, out);
CHECK_ERR(bpf_object__check_endianness(obj), err, out);
+ CHECK_ERR(bpf_object__probe_caps(obj), err, out);
CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
@@ -1855,7 +2399,6 @@ int bpf_object__load(struct bpf_object *obj)
obj->loaded = true;
- CHECK_ERR(bpf_object__probe_caps(obj), err, out);
CHECK_ERR(bpf_object__create_maps(obj), err, out);
CHECK_ERR(bpf_object__relocate(obj), err, out);
CHECK_ERR(bpf_object__load_progs(obj), err, out);
@@ -2147,7 +2690,7 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
if (err)
return err;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
char buf[PATH_MAX];
int len;
@@ -2194,7 +2737,7 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
if (!obj)
return -ENOENT;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
char buf[PATH_MAX];
int len;
@@ -2336,6 +2879,9 @@ void bpf_object__close(struct bpf_object *obj)
obj->maps[i].priv = NULL;
obj->maps[i].clear_priv = NULL;
}
+
+ zfree(&obj->sections.rodata);
+ zfree(&obj->sections.data);
zfree(&obj->maps);
obj->nr_maps = 0;
@@ -2378,6 +2924,11 @@ unsigned int bpf_object__kversion(struct bpf_object *obj)
return obj ? obj->kern_version : 0;
}
+struct btf *bpf_object__btf(struct bpf_object *obj)
+{
+ return obj ? obj->btf : NULL;
+}
+
int bpf_object__btf_fd(const struct bpf_object *obj)
{
return obj->btf ? btf__fd(obj->btf) : -1;
@@ -2659,6 +3210,12 @@ static const struct {
BPF_CGROUP_UDP4_SENDMSG),
BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
BPF_CGROUP_UDP6_SENDMSG),
+ BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ BPF_CGROUP_UDP4_RECVMSG),
+ BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+ BPF_CGROUP_UDP6_RECVMSG),
+ BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
+ BPF_CGROUP_SYSCTL),
};
#undef BPF_PROG_SEC_IMPL
@@ -2667,9 +3224,38 @@ static const struct {
#undef BPF_EAPROG_SEC
#undef BPF_APROG_COMPAT
+#define MAX_TYPE_NAME_SIZE 32
+
+static char *libbpf_get_type_names(bool attach_type)
+{
+ int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE;
+ char *buf;
+
+ buf = malloc(len);
+ if (!buf)
+ return NULL;
+
+ buf[0] = '\0';
+ /* Forge string buf with all available names */
+ for (i = 0; i < ARRAY_SIZE(section_names); i++) {
+ if (attach_type && !section_names[i].is_attachable)
+ continue;
+
+ if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) {
+ free(buf);
+ return NULL;
+ }
+ strcat(buf, " ");
+ strcat(buf, section_names[i].sec);
+ }
+
+ return buf;
+}
+
int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
enum bpf_attach_type *expected_attach_type)
{
+ char *type_names;
int i;
if (!name)
@@ -2682,12 +3268,20 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
*expected_attach_type = section_names[i].expected_attach_type;
return 0;
}
+ pr_warning("failed to guess program type based on ELF section name '%s'\n", name);
+ type_names = libbpf_get_type_names(false);
+ if (type_names != NULL) {
+ pr_info("supported section(type) names are:%s\n", type_names);
+ free(type_names);
+ }
+
return -EINVAL;
}
int libbpf_attach_type_by_name(const char *name,
enum bpf_attach_type *attach_type)
{
+ char *type_names;
int i;
if (!name)
@@ -2701,6 +3295,13 @@ int libbpf_attach_type_by_name(const char *name,
*attach_type = section_names[i].attach_type;
return 0;
}
+ pr_warning("failed to guess attach type based on ELF section name '%s'\n", name);
+ type_names = libbpf_get_type_names(true);
+ if (type_names != NULL) {
+ pr_info("attachable section(type) names are:%s\n", type_names);
+ free(type_names);
+ }
+
return -EINVAL;
}
@@ -2764,6 +3365,11 @@ bool bpf_map__is_offload_neutral(struct bpf_map *map)
return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
}
+bool bpf_map__is_internal(struct bpf_map *map)
+{
+ return map->libbpf_type != LIBBPF_MAP_UNSPEC;
+}
+
void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
{
map->map_ifindex = ifindex;
@@ -2833,13 +3439,19 @@ bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
{
struct bpf_map *pos;
- bpf_map__for_each(pos, obj) {
+ bpf_object__for_each_map(pos, obj) {
if (pos->name && !strcmp(pos->name, name))
return pos;
}
return NULL;
}
+int
+bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name)
+{
+ return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
+}
+
struct bpf_map *
bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
{
@@ -2907,8 +3519,6 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
err = bpf_program__identify_section(prog, &prog_type,
&expected_attach_type);
if (err < 0) {
- pr_warning("failed to guess program type based on section name %s\n",
- prog->section_name);
bpf_object__close(obj);
return -EINVAL;
}
@@ -2918,11 +3528,12 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
bpf_program__set_expected_attach_type(prog,
expected_attach_type);
+ prog->log_level = attr->log_level;
if (!first_prog)
first_prog = prog;
}
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
if (!bpf_map__is_offload_neutral(map))
map->map_ifindex = attr->ifindex;
}
@@ -2991,3 +3602,249 @@ bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
ring_buffer_write_tail(header, data_tail);
return ret;
}
+
+struct bpf_prog_info_array_desc {
+ int array_offset; /* e.g. offset of jited_prog_insns */
+ int count_offset; /* e.g. offset of jited_prog_len */
+ int size_offset; /* > 0: offset of rec size,
+ * < 0: fix size of -size_offset
+ */
+};
+
+static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
+ [BPF_PROG_INFO_JITED_INSNS] = {
+ offsetof(struct bpf_prog_info, jited_prog_insns),
+ offsetof(struct bpf_prog_info, jited_prog_len),
+ -1,
+ },
+ [BPF_PROG_INFO_XLATED_INSNS] = {
+ offsetof(struct bpf_prog_info, xlated_prog_insns),
+ offsetof(struct bpf_prog_info, xlated_prog_len),
+ -1,
+ },
+ [BPF_PROG_INFO_MAP_IDS] = {
+ offsetof(struct bpf_prog_info, map_ids),
+ offsetof(struct bpf_prog_info, nr_map_ids),
+ -(int)sizeof(__u32),
+ },
+ [BPF_PROG_INFO_JITED_KSYMS] = {
+ offsetof(struct bpf_prog_info, jited_ksyms),
+ offsetof(struct bpf_prog_info, nr_jited_ksyms),
+ -(int)sizeof(__u64),
+ },
+ [BPF_PROG_INFO_JITED_FUNC_LENS] = {
+ offsetof(struct bpf_prog_info, jited_func_lens),
+ offsetof(struct bpf_prog_info, nr_jited_func_lens),
+ -(int)sizeof(__u32),
+ },
+ [BPF_PROG_INFO_FUNC_INFO] = {
+ offsetof(struct bpf_prog_info, func_info),
+ offsetof(struct bpf_prog_info, nr_func_info),
+ offsetof(struct bpf_prog_info, func_info_rec_size),
+ },
+ [BPF_PROG_INFO_LINE_INFO] = {
+ offsetof(struct bpf_prog_info, line_info),
+ offsetof(struct bpf_prog_info, nr_line_info),
+ offsetof(struct bpf_prog_info, line_info_rec_size),
+ },
+ [BPF_PROG_INFO_JITED_LINE_INFO] = {
+ offsetof(struct bpf_prog_info, jited_line_info),
+ offsetof(struct bpf_prog_info, nr_jited_line_info),
+ offsetof(struct bpf_prog_info, jited_line_info_rec_size),
+ },
+ [BPF_PROG_INFO_PROG_TAGS] = {
+ offsetof(struct bpf_prog_info, prog_tags),
+ offsetof(struct bpf_prog_info, nr_prog_tags),
+ -(int)sizeof(__u8) * BPF_TAG_SIZE,
+ },
+
+};
+
+static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset)
+{
+ __u32 *array = (__u32 *)info;
+
+ if (offset >= 0)
+ return array[offset / sizeof(__u32)];
+ return -(int)offset;
+}
+
+static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset)
+{
+ __u64 *array = (__u64 *)info;
+
+ if (offset >= 0)
+ return array[offset / sizeof(__u64)];
+ return -(int)offset;
+}
+
+static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
+ __u32 val)
+{
+ __u32 *array = (__u32 *)info;
+
+ if (offset >= 0)
+ array[offset / sizeof(__u32)] = val;
+}
+
+static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
+ __u64 val)
+{
+ __u64 *array = (__u64 *)info;
+
+ if (offset >= 0)
+ array[offset / sizeof(__u64)] = val;
+}
+
+struct bpf_prog_info_linear *
+bpf_program__get_prog_info_linear(int fd, __u64 arrays)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ __u32 data_len = 0;
+ int i, err;
+ void *ptr;
+
+ if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
+ return ERR_PTR(-EINVAL);
+
+ /* step 1: get array dimensions */
+ err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
+ if (err) {
+ pr_debug("can't get prog info: %s", strerror(errno));
+ return ERR_PTR(-EFAULT);
+ }
+
+ /* step 2: calculate total size of all arrays */
+ for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+ bool include_array = (arrays & (1UL << i)) > 0;
+ struct bpf_prog_info_array_desc *desc;
+ __u32 count, size;
+
+ desc = bpf_prog_info_array_desc + i;
+
+ /* kernel is too old to support this field */
+ if (info_len < desc->array_offset + sizeof(__u32) ||
+ info_len < desc->count_offset + sizeof(__u32) ||
+ (desc->size_offset > 0 && info_len < desc->size_offset))
+ include_array = false;
+
+ if (!include_array) {
+ arrays &= ~(1UL << i); /* clear the bit */
+ continue;
+ }
+
+ count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
+ size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
+
+ data_len += count * size;
+ }
+
+ /* step 3: allocate continuous memory */
+ data_len = roundup(data_len, sizeof(__u64));
+ info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
+ if (!info_linear)
+ return ERR_PTR(-ENOMEM);
+
+ /* step 4: fill data to info_linear->info */
+ info_linear->arrays = arrays;
+ memset(&info_linear->info, 0, sizeof(info));
+ ptr = info_linear->data;
+
+ for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+ struct bpf_prog_info_array_desc *desc;
+ __u32 count, size;
+
+ if ((arrays & (1UL << i)) == 0)
+ continue;
+
+ desc = bpf_prog_info_array_desc + i;
+ count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
+ size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
+ bpf_prog_info_set_offset_u32(&info_linear->info,
+ desc->count_offset, count);
+ bpf_prog_info_set_offset_u32(&info_linear->info,
+ desc->size_offset, size);
+ bpf_prog_info_set_offset_u64(&info_linear->info,
+ desc->array_offset,
+ ptr_to_u64(ptr));
+ ptr += count * size;
+ }
+
+ /* step 5: call syscall again to get required arrays */
+ err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
+ if (err) {
+ pr_debug("can't get prog info: %s", strerror(errno));
+ free(info_linear);
+ return ERR_PTR(-EFAULT);
+ }
+
+ /* step 6: verify the data */
+ for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+ struct bpf_prog_info_array_desc *desc;
+ __u32 v1, v2;
+
+ if ((arrays & (1UL << i)) == 0)
+ continue;
+
+ desc = bpf_prog_info_array_desc + i;
+ v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
+ v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
+ desc->count_offset);
+ if (v1 != v2)
+ pr_warning("%s: mismatch in element count\n", __func__);
+
+ v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
+ v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
+ desc->size_offset);
+ if (v1 != v2)
+ pr_warning("%s: mismatch in rec size\n", __func__);
+ }
+
+ /* step 7: update info_len and data_len */
+ info_linear->info_len = sizeof(struct bpf_prog_info);
+ info_linear->data_len = data_len;
+
+ return info_linear;
+}
+
+void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
+{
+ int i;
+
+ for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+ struct bpf_prog_info_array_desc *desc;
+ __u64 addr, offs;
+
+ if ((info_linear->arrays & (1UL << i)) == 0)
+ continue;
+
+ desc = bpf_prog_info_array_desc + i;
+ addr = bpf_prog_info_read_offset_u64(&info_linear->info,
+ desc->array_offset);
+ offs = addr - ptr_to_u64(info_linear->data);
+ bpf_prog_info_set_offset_u64(&info_linear->info,
+ desc->array_offset, offs);
+ }
+}
+
+void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
+{
+ int i;
+
+ for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
+ struct bpf_prog_info_array_desc *desc;
+ __u64 addr, offs;
+
+ if ((info_linear->arrays & (1UL << i)) == 0)
+ continue;
+
+ desc = bpf_prog_info_array_desc + i;
+ offs = bpf_prog_info_read_offset_u64(&info_linear->info,
+ desc->array_offset);
+ addr = offs + ptr_to_u64(info_linear->data);
+ bpf_prog_info_set_offset_u64(&info_linear->info,
+ desc->array_offset, addr);
+ }
+}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 5f68d7b75215..c5ff00515ce7 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -10,6 +10,7 @@
#ifndef __LIBBPF_LIBBPF_H
#define __LIBBPF_LIBBPF_H
+#include <stdarg.h>
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
@@ -47,17 +48,16 @@ enum libbpf_errno {
LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size);
-/*
- * __printf is defined in include/linux/compiler-gcc.h. However,
- * it would be better if libbpf.h didn't depend on Linux header files.
- * So instead of __printf, here we use gcc attribute directly.
- */
-typedef int (*libbpf_print_fn_t)(const char *, ...)
- __attribute__((format(printf, 1, 2)));
+enum libbpf_print_level {
+ LIBBPF_WARN,
+ LIBBPF_INFO,
+ LIBBPF_DEBUG,
+};
+
+typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,
+ const char *, va_list ap);
-LIBBPF_API void libbpf_set_print(libbpf_print_fn_t warn,
- libbpf_print_fn_t info,
- libbpf_print_fn_t debug);
+LIBBPF_API void libbpf_set_print(libbpf_print_fn_t fn);
/* Hide internal to user */
struct bpf_object;
@@ -75,6 +75,10 @@ struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
LIBBPF_API struct bpf_object *bpf_object__open_buffer(void *obj_buf,
size_t obj_buf_sz,
const char *name);
+int bpf_object__section_size(const struct bpf_object *obj, const char *name,
+ __u32 *size);
+int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
+ __u32 *off);
LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path);
LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj,
const char *path);
@@ -90,6 +94,9 @@ LIBBPF_API int bpf_object__load(struct bpf_object *obj);
LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
LIBBPF_API const char *bpf_object__name(struct bpf_object *obj);
LIBBPF_API unsigned int bpf_object__kversion(struct bpf_object *obj);
+
+struct btf;
+LIBBPF_API struct btf *bpf_object__btf(struct bpf_object *obj);
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
LIBBPF_API struct bpf_program *
@@ -264,6 +271,9 @@ struct bpf_map;
LIBBPF_API struct bpf_map *
bpf_object__find_map_by_name(struct bpf_object *obj, const char *name);
+LIBBPF_API int
+bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name);
+
/*
* Get bpf_map through the offset of corresponding struct bpf_map_def
* in the BPF object file.
@@ -273,10 +283,11 @@ bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset);
LIBBPF_API struct bpf_map *
bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
-#define bpf_map__for_each(pos, obj) \
+#define bpf_object__for_each_map(pos, obj) \
for ((pos) = bpf_map__next(NULL, (obj)); \
(pos) != NULL; \
(pos) = bpf_map__next((pos), (obj)))
+#define bpf_map__for_each bpf_object__for_each_map
LIBBPF_API struct bpf_map *
bpf_map__prev(struct bpf_map *map, struct bpf_object *obj);
@@ -292,7 +303,9 @@ LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
bpf_map_clear_priv_t clear_priv);
LIBBPF_API void *bpf_map__priv(struct bpf_map *map);
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
+LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
LIBBPF_API bool bpf_map__is_offload_neutral(struct bpf_map *map);
+LIBBPF_API bool bpf_map__is_internal(struct bpf_map *map);
LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
@@ -306,6 +319,7 @@ struct bpf_prog_load_attr {
enum bpf_prog_type prog_type;
enum bpf_attach_type expected_attach_type;
int ifindex;
+ int log_level;
};
LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
@@ -314,6 +328,7 @@ LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd);
LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags);
+LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags);
enum bpf_perf_event_ret {
LIBBPF_PERF_EVENT_DONE = 0,
@@ -355,6 +370,83 @@ LIBBPF_API const struct bpf_line_info *
bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
__u32 insn_off, __u32 nr_skip);
+/*
+ * Probe for supported system features
+ *
+ * Note that running many of these probes in a short amount of time can cause
+ * the kernel to reach the maximal size of lockable memory allowed for the
+ * user, causing subsequent probes to fail. In this case, the caller may want
+ * to adjust that limit with setrlimit().
+ */
+LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type,
+ __u32 ifindex);
+LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
+LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id,
+ enum bpf_prog_type prog_type, __u32 ifindex);
+
+/*
+ * Get bpf_prog_info in continuous memory
+ *
+ * struct bpf_prog_info has multiple arrays. The user has option to choose
+ * arrays to fetch from kernel. The following APIs provide an uniform way to
+ * fetch these data. All arrays in bpf_prog_info are stored in a single
+ * continuous memory region. This makes it easy to store the info in a
+ * file.
+ *
+ * Before writing bpf_prog_info_linear to files, it is necessary to
+ * translate pointers in bpf_prog_info to offsets. Helper functions
+ * bpf_program__bpil_addr_to_offs() and bpf_program__bpil_offs_to_addr()
+ * are introduced to switch between pointers and offsets.
+ *
+ * Examples:
+ * # To fetch map_ids and prog_tags:
+ * __u64 arrays = (1UL << BPF_PROG_INFO_MAP_IDS) |
+ * (1UL << BPF_PROG_INFO_PROG_TAGS);
+ * struct bpf_prog_info_linear *info_linear =
+ * bpf_program__get_prog_info_linear(fd, arrays);
+ *
+ * # To save data in file
+ * bpf_program__bpil_addr_to_offs(info_linear);
+ * write(f, info_linear, sizeof(*info_linear) + info_linear->data_len);
+ *
+ * # To read data from file
+ * read(f, info_linear, <proper_size>);
+ * bpf_program__bpil_offs_to_addr(info_linear);
+ */
+enum bpf_prog_info_array {
+ BPF_PROG_INFO_FIRST_ARRAY = 0,
+ BPF_PROG_INFO_JITED_INSNS = 0,
+ BPF_PROG_INFO_XLATED_INSNS,
+ BPF_PROG_INFO_MAP_IDS,
+ BPF_PROG_INFO_JITED_KSYMS,
+ BPF_PROG_INFO_JITED_FUNC_LENS,
+ BPF_PROG_INFO_FUNC_INFO,
+ BPF_PROG_INFO_LINE_INFO,
+ BPF_PROG_INFO_JITED_LINE_INFO,
+ BPF_PROG_INFO_PROG_TAGS,
+ BPF_PROG_INFO_LAST_ARRAY,
+};
+
+struct bpf_prog_info_linear {
+ /* size of struct bpf_prog_info, when the tool is compiled */
+ __u32 info_len;
+ /* total bytes allocated for data, round up to 8 bytes */
+ __u32 data_len;
+ /* which arrays are included in data */
+ __u64 arrays;
+ struct bpf_prog_info info;
+ __u8 data[];
+};
+
+LIBBPF_API struct bpf_prog_info_linear *
+bpf_program__get_prog_info_linear(int fd, __u64 arrays);
+
+LIBBPF_API void
+bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
+
+LIBBPF_API void
+bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index cd02cd4e2cc3..673001787cba 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -124,3 +124,43 @@ LIBBPF_0.0.1 {
local:
*;
};
+
+LIBBPF_0.0.2 {
+ global:
+ bpf_probe_helper;
+ bpf_probe_map_type;
+ bpf_probe_prog_type;
+ bpf_map__resize;
+ bpf_map_lookup_elem_flags;
+ bpf_object__btf;
+ bpf_object__find_map_fd_by_name;
+ bpf_get_link_xdp_id;
+ btf__dedup;
+ btf__get_map_kv_tids;
+ btf__get_nr_types;
+ btf__get_raw_data;
+ btf__load;
+ btf_ext__free;
+ btf_ext__func_info_rec_size;
+ btf_ext__get_raw_data;
+ btf_ext__line_info_rec_size;
+ btf_ext__new;
+ btf_ext__reloc_func_info;
+ btf_ext__reloc_line_info;
+ xsk_umem__create;
+ xsk_socket__create;
+ xsk_umem__delete;
+ xsk_socket__delete;
+ xsk_umem__fd;
+ xsk_socket__fd;
+ bpf_program__get_prog_info_linear;
+ bpf_program__bpil_addr_to_offs;
+ bpf_program__bpil_offs_to_addr;
+} LIBBPF_0.0.1;
+
+LIBBPF_0.0.3 {
+ global:
+ bpf_map__is_internal;
+ bpf_map_freeze;
+ btf__finalize_data;
+} LIBBPF_0.0.2;
diff --git a/tools/lib/bpf/libbpf.pc.template b/tools/lib/bpf/libbpf.pc.template
new file mode 100644
index 000000000000..ac17fcef2108
--- /dev/null
+++ b/tools/lib/bpf/libbpf.pc.template
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+prefix=@PREFIX@
+libdir=@LIBDIR@
+includedir=${prefix}/include
+
+Name: libbpf
+Description: BPF library
+Version: @VERSION@
+Libs: -L${libdir} -lbpf
+Requires.private: libelf
+Cflags: -I${includedir}
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
new file mode 100644
index 000000000000..dfab8012185c
--- /dev/null
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+/*
+ * Internal libbpf helpers.
+ *
+ * Copyright (c) 2019 Facebook
+ */
+
+#ifndef __LIBBPF_LIBBPF_INTERNAL_H
+#define __LIBBPF_LIBBPF_INTERNAL_H
+
+#define BTF_INFO_ENC(kind, kind_flag, vlen) \
+ ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
+#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
+#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
+ ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
+#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
+ BTF_INT_ENC(encoding, bits_offset, bits)
+#define BTF_MEMBER_ENC(name, type, bits_offset) (name), (type), (bits_offset)
+#define BTF_PARAM_ENC(name, type) (name), (type)
+#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
+
+extern void libbpf_print(enum libbpf_print_level level,
+ const char *format, ...)
+ __attribute__((format(printf, 2, 3)));
+
+#define __pr(level, fmt, ...) \
+do { \
+ libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define pr_warning(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__)
+#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
+#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
+
+int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
+ const char *str_sec, size_t str_len);
+
+#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
new file mode 100644
index 000000000000..6635a31a7a16
--- /dev/null
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2019 Netronome Systems, Inc. */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <net/if.h>
+#include <sys/utsname.h>
+
+#include <linux/btf.h>
+#include <linux/filter.h>
+#include <linux/kernel.h>
+
+#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_internal.h"
+
+static bool grep(const char *buffer, const char *pattern)
+{
+ return !!strstr(buffer, pattern);
+}
+
+static int get_vendor_id(int ifindex)
+{
+ char ifname[IF_NAMESIZE], path[64], buf[8];
+ ssize_t len;
+ int fd;
+
+ if (!if_indextoname(ifindex, ifname))
+ return -1;
+
+ snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ len = read(fd, buf, sizeof(buf));
+ close(fd);
+ if (len < 0)
+ return -1;
+ if (len >= (ssize_t)sizeof(buf))
+ return -1;
+ buf[len] = '\0';
+
+ return strtol(buf, NULL, 0);
+}
+
+static int get_kernel_version(void)
+{
+ int version, subversion, patchlevel;
+ struct utsname utsn;
+
+ /* Return 0 on failure, and attempt to probe with empty kversion */
+ if (uname(&utsn))
+ return 0;
+
+ if (sscanf(utsn.release, "%d.%d.%d",
+ &version, &subversion, &patchlevel) != 3)
+ return 0;
+
+ return (version << 16) + (subversion << 8) + patchlevel;
+}
+
+static void
+probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
+ size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex)
+{
+ struct bpf_load_program_attr xattr = {};
+ int fd;
+
+ switch (prog_type) {
+ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+ xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
+ break;
+ case BPF_PROG_TYPE_KPROBE:
+ xattr.kern_version = get_kernel_version();
+ break;
+ case BPF_PROG_TYPE_UNSPEC:
+ case BPF_PROG_TYPE_SOCKET_FILTER:
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_SCHED_ACT:
+ case BPF_PROG_TYPE_TRACEPOINT:
+ case BPF_PROG_TYPE_XDP:
+ case BPF_PROG_TYPE_PERF_EVENT:
+ case BPF_PROG_TYPE_CGROUP_SKB:
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ case BPF_PROG_TYPE_LWT_IN:
+ case BPF_PROG_TYPE_LWT_OUT:
+ case BPF_PROG_TYPE_LWT_XMIT:
+ case BPF_PROG_TYPE_SOCK_OPS:
+ case BPF_PROG_TYPE_SK_SKB:
+ case BPF_PROG_TYPE_CGROUP_DEVICE:
+ case BPF_PROG_TYPE_SK_MSG:
+ case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
+ case BPF_PROG_TYPE_LWT_SEG6LOCAL:
+ case BPF_PROG_TYPE_LIRC_MODE2:
+ case BPF_PROG_TYPE_SK_REUSEPORT:
+ case BPF_PROG_TYPE_FLOW_DISSECTOR:
+ case BPF_PROG_TYPE_CGROUP_SYSCTL:
+ default:
+ break;
+ }
+
+ xattr.prog_type = prog_type;
+ xattr.insns = insns;
+ xattr.insns_cnt = insns_cnt;
+ xattr.license = "GPL";
+ xattr.prog_ifindex = ifindex;
+
+ fd = bpf_load_program_xattr(&xattr, buf, buf_len);
+ if (fd >= 0)
+ close(fd);
+}
+
+bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
+{
+ struct bpf_insn insns[2] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN()
+ };
+
+ if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS)
+ /* nfp returns -EINVAL on exit(0) with TC offload */
+ insns[0].imm = 2;
+
+ errno = 0;
+ probe_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
+
+ return errno != EINVAL && errno != EOPNOTSUPP;
+}
+
+int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
+ const char *str_sec, size_t str_len)
+{
+ struct btf_header hdr = {
+ .magic = BTF_MAGIC,
+ .version = BTF_VERSION,
+ .hdr_len = sizeof(struct btf_header),
+ .type_len = types_len,
+ .str_off = types_len,
+ .str_len = str_len,
+ };
+ int btf_fd, btf_len;
+ __u8 *raw_btf;
+
+ btf_len = hdr.hdr_len + hdr.type_len + hdr.str_len;
+ raw_btf = malloc(btf_len);
+ if (!raw_btf)
+ return -ENOMEM;
+
+ memcpy(raw_btf, &hdr, sizeof(hdr));
+ memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
+ memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
+
+ btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false);
+
+ free(raw_btf);
+ return btf_fd;
+}
+
+static int load_sk_storage_btf(void)
+{
+ const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l";
+ /* struct bpf_spin_lock {
+ * int val;
+ * };
+ * struct val {
+ * int cnt;
+ * struct bpf_spin_lock l;
+ * };
+ */
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* struct bpf_spin_lock */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
+ BTF_MEMBER_ENC(15, 1, 0), /* int val; */
+ /* struct val */ /* [3] */
+ BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
+ BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
+ BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
+ };
+
+ return libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs));
+}
+
+bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
+{
+ int key_size, value_size, max_entries, map_flags;
+ __u32 btf_key_type_id = 0, btf_value_type_id = 0;
+ struct bpf_create_map_attr attr = {};
+ int fd = -1, btf_fd = -1, fd_inner;
+
+ key_size = sizeof(__u32);
+ value_size = sizeof(__u32);
+ max_entries = 1;
+ map_flags = 0;
+
+ switch (map_type) {
+ case BPF_MAP_TYPE_STACK_TRACE:
+ value_size = sizeof(__u64);
+ break;
+ case BPF_MAP_TYPE_LPM_TRIE:
+ key_size = sizeof(__u64);
+ value_size = sizeof(__u64);
+ map_flags = BPF_F_NO_PREALLOC;
+ break;
+ case BPF_MAP_TYPE_CGROUP_STORAGE:
+ case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
+ key_size = sizeof(struct bpf_cgroup_storage_key);
+ value_size = sizeof(__u64);
+ max_entries = 0;
+ break;
+ case BPF_MAP_TYPE_QUEUE:
+ case BPF_MAP_TYPE_STACK:
+ key_size = 0;
+ break;
+ case BPF_MAP_TYPE_SK_STORAGE:
+ btf_key_type_id = 1;
+ btf_value_type_id = 3;
+ value_size = 8;
+ max_entries = 0;
+ map_flags = BPF_F_NO_PREALLOC;
+ btf_fd = load_sk_storage_btf();
+ if (btf_fd < 0)
+ return false;
+ break;
+ case BPF_MAP_TYPE_UNSPEC:
+ case BPF_MAP_TYPE_HASH:
+ case BPF_MAP_TYPE_ARRAY:
+ case BPF_MAP_TYPE_PROG_ARRAY:
+ case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+ case BPF_MAP_TYPE_PERCPU_HASH:
+ case BPF_MAP_TYPE_PERCPU_ARRAY:
+ case BPF_MAP_TYPE_CGROUP_ARRAY:
+ case BPF_MAP_TYPE_LRU_HASH:
+ case BPF_MAP_TYPE_LRU_PERCPU_HASH:
+ case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+ case BPF_MAP_TYPE_HASH_OF_MAPS:
+ case BPF_MAP_TYPE_DEVMAP:
+ case BPF_MAP_TYPE_SOCKMAP:
+ case BPF_MAP_TYPE_CPUMAP:
+ case BPF_MAP_TYPE_XSKMAP:
+ case BPF_MAP_TYPE_SOCKHASH:
+ case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
+ default:
+ break;
+ }
+
+ if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
+ map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
+ /* TODO: probe for device, once libbpf has a function to create
+ * map-in-map for offload
+ */
+ if (ifindex)
+ return false;
+
+ fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH,
+ sizeof(__u32), sizeof(__u32), 1, 0);
+ if (fd_inner < 0)
+ return false;
+ fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32),
+ fd_inner, 1, 0);
+ close(fd_inner);
+ } else {
+ /* Note: No other restriction on map type probes for offload */
+ attr.map_type = map_type;
+ attr.key_size = key_size;
+ attr.value_size = value_size;
+ attr.max_entries = max_entries;
+ attr.map_flags = map_flags;
+ attr.map_ifindex = ifindex;
+ if (btf_fd >= 0) {
+ attr.btf_fd = btf_fd;
+ attr.btf_key_type_id = btf_key_type_id;
+ attr.btf_value_type_id = btf_value_type_id;
+ }
+
+ fd = bpf_create_map_xattr(&attr);
+ }
+ if (fd >= 0)
+ close(fd);
+ if (btf_fd >= 0)
+ close(btf_fd);
+
+ return fd >= 0;
+}
+
+bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
+ __u32 ifindex)
+{
+ struct bpf_insn insns[2] = {
+ BPF_EMIT_CALL(id),
+ BPF_EXIT_INSN()
+ };
+ char buf[4096] = {};
+ bool res;
+
+ probe_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf),
+ ifindex);
+ res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
+
+ if (ifindex) {
+ switch (get_vendor_id(ifindex)) {
+ case 0x19ee: /* Netronome specific */
+ res = res && !grep(buf, "not supported by FW") &&
+ !grep(buf, "unsupported function id");
+ break;
+ default:
+ break;
+ }
+ }
+
+ return res;
+}
diff --git a/tools/lib/bpf/libbpf_util.h b/tools/lib/bpf/libbpf_util.h
new file mode 100644
index 000000000000..59c779c5790c
--- /dev/null
+++ b/tools/lib/bpf/libbpf_util.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+/* Copyright (c) 2019 Facebook */
+
+#ifndef __LIBBPF_LIBBPF_UTIL_H
+#define __LIBBPF_LIBBPF_UTIL_H
+
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Use these barrier functions instead of smp_[rw]mb() when they are
+ * used in a libbpf header file. That way they can be built into the
+ * application that uses libbpf.
+ */
+#if defined(__i386__) || defined(__x86_64__)
+# define libbpf_smp_rmb() asm volatile("" : : : "memory")
+# define libbpf_smp_wmb() asm volatile("" : : : "memory")
+# define libbpf_smp_mb() \
+ asm volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc")
+/* Hinders stores to be observed before older loads. */
+# define libbpf_smp_rwmb() asm volatile("" : : : "memory")
+#elif defined(__aarch64__)
+# define libbpf_smp_rmb() asm volatile("dmb ishld" : : : "memory")
+# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory")
+# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory")
+# define libbpf_smp_rwmb() libbpf_smp_mb()
+#elif defined(__arm__)
+/* These are only valid for armv7 and above */
+# define libbpf_smp_rmb() asm volatile("dmb ish" : : : "memory")
+# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory")
+# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory")
+# define libbpf_smp_rwmb() libbpf_smp_mb()
+#else
+/* Architecture missing native barrier functions. */
+# define libbpf_smp_rmb() __sync_synchronize()
+# define libbpf_smp_wmb() __sync_synchronize()
+# define libbpf_smp_mb() __sync_synchronize()
+# define libbpf_smp_rwmb() __sync_synchronize()
+#endif
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index 0ce67aea8f3b..ce3ec81b71c0 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -21,6 +21,12 @@
typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t,
void *cookie);
+struct xdp_id_md {
+ int ifindex;
+ __u32 flags;
+ __u32 id;
+};
+
int libbpf_netlink_open(__u32 *nl_pid)
{
struct sockaddr_nl sa;
@@ -196,6 +202,85 @@ static int __dump_link_nlmsg(struct nlmsghdr *nlh,
return dump_link_nlmsg(cookie, ifi, tb);
}
+static unsigned char get_xdp_id_attr(unsigned char mode, __u32 flags)
+{
+ if (mode != XDP_ATTACHED_MULTI)
+ return IFLA_XDP_PROG_ID;
+ if (flags & XDP_FLAGS_DRV_MODE)
+ return IFLA_XDP_DRV_PROG_ID;
+ if (flags & XDP_FLAGS_HW_MODE)
+ return IFLA_XDP_HW_PROG_ID;
+ if (flags & XDP_FLAGS_SKB_MODE)
+ return IFLA_XDP_SKB_PROG_ID;
+
+ return IFLA_XDP_UNSPEC;
+}
+
+static int get_xdp_id(void *cookie, void *msg, struct nlattr **tb)
+{
+ struct nlattr *xdp_tb[IFLA_XDP_MAX + 1];
+ struct xdp_id_md *xdp_id = cookie;
+ struct ifinfomsg *ifinfo = msg;
+ unsigned char mode, xdp_attr;
+ int ret;
+
+ if (xdp_id->ifindex && xdp_id->ifindex != ifinfo->ifi_index)
+ return 0;
+
+ if (!tb[IFLA_XDP])
+ return 0;
+
+ ret = libbpf_nla_parse_nested(xdp_tb, IFLA_XDP_MAX, tb[IFLA_XDP], NULL);
+ if (ret)
+ return ret;
+
+ if (!xdp_tb[IFLA_XDP_ATTACHED])
+ return 0;
+
+ mode = libbpf_nla_getattr_u8(xdp_tb[IFLA_XDP_ATTACHED]);
+ if (mode == XDP_ATTACHED_NONE)
+ return 0;
+
+ xdp_attr = get_xdp_id_attr(mode, xdp_id->flags);
+ if (!xdp_attr || !xdp_tb[xdp_attr])
+ return 0;
+
+ xdp_id->id = libbpf_nla_getattr_u32(xdp_tb[xdp_attr]);
+
+ return 0;
+}
+
+int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags)
+{
+ struct xdp_id_md xdp_id = {};
+ int sock, ret;
+ __u32 nl_pid;
+ __u32 mask;
+
+ if (flags & ~XDP_FLAGS_MASK)
+ return -EINVAL;
+
+ /* Check whether the single {HW,DRV,SKB} mode is set */
+ flags &= (XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE);
+ mask = flags - 1;
+ if (flags && flags & mask)
+ return -EINVAL;
+
+ sock = libbpf_netlink_open(&nl_pid);
+ if (sock < 0)
+ return sock;
+
+ xdp_id.ifindex = ifindex;
+ xdp_id.flags = flags;
+
+ ret = libbpf_nl_get_link(sock, nl_pid, get_xdp_id, &xdp_id);
+ if (!ret)
+ *prog_id = xdp_id.id;
+
+ close(sock);
+ return ret;
+}
+
int libbpf_nl_get_link(int sock, unsigned int nl_pid,
libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie)
{
diff --git a/tools/lib/bpf/test_libbpf.cpp b/tools/lib/bpf/test_libbpf.cpp
index abf3fc25c9fa..fc134873bb6d 100644
--- a/tools/lib/bpf/test_libbpf.cpp
+++ b/tools/lib/bpf/test_libbpf.cpp
@@ -8,11 +8,11 @@
int main(int argc, char *argv[])
{
/* libbpf.h */
- libbpf_set_print(NULL, NULL, NULL);
+ libbpf_set_print(NULL);
/* bpf.h */
bpf_prog_get_fd_by_id(0);
/* btf.h */
- btf__new(NULL, 0, NULL);
+ btf__new(NULL, 0);
}
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
new file mode 100644
index 000000000000..38667b62f1fe
--- /dev/null
+++ b/tools/lib/bpf/xsk.c
@@ -0,0 +1,735 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+/*
+ * AF_XDP user-space access library.
+ *
+ * Copyright(c) 2018 - 2019 Intel Corporation.
+ *
+ * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <asm/barrier.h>
+#include <linux/compiler.h>
+#include <linux/ethtool.h>
+#include <linux/filter.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/if_xdp.h>
+#include <linux/sockios.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_internal.h"
+#include "xsk.h"
+
+#ifndef SOL_XDP
+ #define SOL_XDP 283
+#endif
+
+#ifndef AF_XDP
+ #define AF_XDP 44
+#endif
+
+#ifndef PF_XDP
+ #define PF_XDP AF_XDP
+#endif
+
+struct xsk_umem {
+ struct xsk_ring_prod *fill;
+ struct xsk_ring_cons *comp;
+ char *umem_area;
+ struct xsk_umem_config config;
+ int fd;
+ int refcount;
+};
+
+struct xsk_socket {
+ struct xsk_ring_cons *rx;
+ struct xsk_ring_prod *tx;
+ __u64 outstanding_tx;
+ struct xsk_umem *umem;
+ struct xsk_socket_config config;
+ int fd;
+ int xsks_map;
+ int ifindex;
+ int prog_fd;
+ int qidconf_map_fd;
+ int xsks_map_fd;
+ __u32 queue_id;
+ char ifname[IFNAMSIZ];
+};
+
+struct xsk_nl_info {
+ bool xdp_prog_attached;
+ int ifindex;
+ int fd;
+};
+
+/* For 32-bit systems, we need to use mmap2 as the offsets are 64-bit.
+ * Unfortunately, it is not part of glibc.
+ */
+static inline void *xsk_mmap(void *addr, size_t length, int prot, int flags,
+ int fd, __u64 offset)
+{
+#ifdef __NR_mmap2
+ unsigned int page_shift = __builtin_ffs(getpagesize()) - 1;
+ long ret = syscall(__NR_mmap2, addr, length, prot, flags, fd,
+ (off_t)(offset >> page_shift));
+
+ return (void *)ret;
+#else
+ return mmap(addr, length, prot, flags, fd, offset);
+#endif
+}
+
+int xsk_umem__fd(const struct xsk_umem *umem)
+{
+ return umem ? umem->fd : -EINVAL;
+}
+
+int xsk_socket__fd(const struct xsk_socket *xsk)
+{
+ return xsk ? xsk->fd : -EINVAL;
+}
+
+static bool xsk_page_aligned(void *buffer)
+{
+ unsigned long addr = (unsigned long)buffer;
+
+ return !(addr & (getpagesize() - 1));
+}
+
+static void xsk_set_umem_config(struct xsk_umem_config *cfg,
+ const struct xsk_umem_config *usr_cfg)
+{
+ if (!usr_cfg) {
+ cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+ cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
+ cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
+ return;
+ }
+
+ cfg->fill_size = usr_cfg->fill_size;
+ cfg->comp_size = usr_cfg->comp_size;
+ cfg->frame_size = usr_cfg->frame_size;
+ cfg->frame_headroom = usr_cfg->frame_headroom;
+}
+
+static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
+ const struct xsk_socket_config *usr_cfg)
+{
+ if (!usr_cfg) {
+ cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
+ cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
+ cfg->libbpf_flags = 0;
+ cfg->xdp_flags = 0;
+ cfg->bind_flags = 0;
+ return 0;
+ }
+
+ if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
+ return -EINVAL;
+
+ cfg->rx_size = usr_cfg->rx_size;
+ cfg->tx_size = usr_cfg->tx_size;
+ cfg->libbpf_flags = usr_cfg->libbpf_flags;
+ cfg->xdp_flags = usr_cfg->xdp_flags;
+ cfg->bind_flags = usr_cfg->bind_flags;
+
+ return 0;
+}
+
+int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
+ struct xsk_ring_prod *fill, struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *usr_config)
+{
+ struct xdp_mmap_offsets off;
+ struct xdp_umem_reg mr;
+ struct xsk_umem *umem;
+ socklen_t optlen;
+ void *map;
+ int err;
+
+ if (!umem_area || !umem_ptr || !fill || !comp)
+ return -EFAULT;
+ if (!size && !xsk_page_aligned(umem_area))
+ return -EINVAL;
+
+ umem = calloc(1, sizeof(*umem));
+ if (!umem)
+ return -ENOMEM;
+
+ umem->fd = socket(AF_XDP, SOCK_RAW, 0);
+ if (umem->fd < 0) {
+ err = -errno;
+ goto out_umem_alloc;
+ }
+
+ umem->umem_area = umem_area;
+ xsk_set_umem_config(&umem->config, usr_config);
+
+ mr.addr = (uintptr_t)umem_area;
+ mr.len = size;
+ mr.chunk_size = umem->config.frame_size;
+ mr.headroom = umem->config.frame_headroom;
+
+ err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+ err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_FILL_RING,
+ &umem->config.fill_size,
+ sizeof(umem->config.fill_size));
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+ err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
+ &umem->config.comp_size,
+ sizeof(umem->config.comp_size));
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+
+ optlen = sizeof(off);
+ err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+
+ map = xsk_mmap(NULL, off.fr.desc +
+ umem->config.fill_size * sizeof(__u64),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
+ umem->fd, XDP_UMEM_PGOFF_FILL_RING);
+ if (map == MAP_FAILED) {
+ err = -errno;
+ goto out_socket;
+ }
+
+ umem->fill = fill;
+ fill->mask = umem->config.fill_size - 1;
+ fill->size = umem->config.fill_size;
+ fill->producer = map + off.fr.producer;
+ fill->consumer = map + off.fr.consumer;
+ fill->ring = map + off.fr.desc;
+ fill->cached_cons = umem->config.fill_size;
+
+ map = xsk_mmap(NULL,
+ off.cr.desc + umem->config.comp_size * sizeof(__u64),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
+ umem->fd, XDP_UMEM_PGOFF_COMPLETION_RING);
+ if (map == MAP_FAILED) {
+ err = -errno;
+ goto out_mmap;
+ }
+
+ umem->comp = comp;
+ comp->mask = umem->config.comp_size - 1;
+ comp->size = umem->config.comp_size;
+ comp->producer = map + off.cr.producer;
+ comp->consumer = map + off.cr.consumer;
+ comp->ring = map + off.cr.desc;
+
+ *umem_ptr = umem;
+ return 0;
+
+out_mmap:
+ munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
+out_socket:
+ close(umem->fd);
+out_umem_alloc:
+ free(umem);
+ return err;
+}
+
+static int xsk_load_xdp_prog(struct xsk_socket *xsk)
+{
+ static const int log_buf_size = 16 * 1024;
+ char log_buf[log_buf_size];
+ int err, prog_fd;
+
+ /* This is the C-program:
+ * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
+ * {
+ * int *qidconf, index = ctx->rx_queue_index;
+ *
+ * // A set entry here means that the correspnding queue_id
+ * // has an active AF_XDP socket bound to it.
+ * qidconf = bpf_map_lookup_elem(&qidconf_map, &index);
+ * if (!qidconf)
+ * return XDP_ABORTED;
+ *
+ * if (*qidconf)
+ * return bpf_redirect_map(&xsks_map, index, 0);
+ *
+ * return XDP_PASS;
+ * }
+ */
+ struct bpf_insn prog[] = {
+ /* r1 = *(u32 *)(r1 + 16) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 16),
+ /* *(u32 *)(r10 - 4) = r1 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, -4),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, xsk->qidconf_map_fd),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ /* if r1 == 0 goto +8 */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ /* r1 = *(u32 *)(r1 + 0) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 0),
+ /* if r1 == 0 goto +5 */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
+ /* r2 = *(u32 *)(r10 - 4) */
+ BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
+ BPF_MOV32_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_redirect_map),
+ /* The jumps are to this instruction */
+ BPF_EXIT_INSN(),
+ };
+ size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
+
+ prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, prog, insns_cnt,
+ "LGPL-2.1 or BSD-2-Clause", 0, log_buf,
+ log_buf_size);
+ if (prog_fd < 0) {
+ pr_warning("BPF log buffer:\n%s", log_buf);
+ return prog_fd;
+ }
+
+ err = bpf_set_link_xdp_fd(xsk->ifindex, prog_fd, xsk->config.xdp_flags);
+ if (err) {
+ close(prog_fd);
+ return err;
+ }
+
+ xsk->prog_fd = prog_fd;
+ return 0;
+}
+
+static int xsk_get_max_queues(struct xsk_socket *xsk)
+{
+ struct ethtool_channels channels;
+ struct ifreq ifr;
+ int fd, err, ret;
+
+ fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (fd < 0)
+ return -errno;
+
+ channels.cmd = ETHTOOL_GCHANNELS;
+ ifr.ifr_data = (void *)&channels;
+ strncpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ);
+ err = ioctl(fd, SIOCETHTOOL, &ifr);
+ if (err && errno != EOPNOTSUPP) {
+ ret = -errno;
+ goto out;
+ }
+
+ if (channels.max_combined == 0 || errno == EOPNOTSUPP)
+ /* If the device says it has no channels, then all traffic
+ * is sent to a single stream, so max queues = 1.
+ */
+ ret = 1;
+ else
+ ret = channels.max_combined;
+
+out:
+ close(fd);
+ return ret;
+}
+
+static int xsk_create_bpf_maps(struct xsk_socket *xsk)
+{
+ int max_queues;
+ int fd;
+
+ max_queues = xsk_get_max_queues(xsk);
+ if (max_queues < 0)
+ return max_queues;
+
+ fd = bpf_create_map_name(BPF_MAP_TYPE_ARRAY, "qidconf_map",
+ sizeof(int), sizeof(int), max_queues, 0);
+ if (fd < 0)
+ return fd;
+ xsk->qidconf_map_fd = fd;
+
+ fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
+ sizeof(int), sizeof(int), max_queues, 0);
+ if (fd < 0) {
+ close(xsk->qidconf_map_fd);
+ return fd;
+ }
+ xsk->xsks_map_fd = fd;
+
+ return 0;
+}
+
+static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
+{
+ close(xsk->qidconf_map_fd);
+ close(xsk->xsks_map_fd);
+ xsk->qidconf_map_fd = -1;
+ xsk->xsks_map_fd = -1;
+}
+
+static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
+{
+ __u32 i, *map_ids, num_maps, prog_len = sizeof(struct bpf_prog_info);
+ __u32 map_len = sizeof(struct bpf_map_info);
+ struct bpf_prog_info prog_info = {};
+ struct bpf_map_info map_info;
+ int fd, err;
+
+ err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
+ if (err)
+ return err;
+
+ num_maps = prog_info.nr_map_ids;
+
+ map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids));
+ if (!map_ids)
+ return -ENOMEM;
+
+ memset(&prog_info, 0, prog_len);
+ prog_info.nr_map_ids = num_maps;
+ prog_info.map_ids = (__u64)(unsigned long)map_ids;
+
+ err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
+ if (err)
+ goto out_map_ids;
+
+ for (i = 0; i < prog_info.nr_map_ids; i++) {
+ if (xsk->qidconf_map_fd != -1 && xsk->xsks_map_fd != -1)
+ break;
+
+ fd = bpf_map_get_fd_by_id(map_ids[i]);
+ if (fd < 0)
+ continue;
+
+ err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
+ if (err) {
+ close(fd);
+ continue;
+ }
+
+ if (!strcmp(map_info.name, "qidconf_map")) {
+ xsk->qidconf_map_fd = fd;
+ continue;
+ }
+
+ if (!strcmp(map_info.name, "xsks_map")) {
+ xsk->xsks_map_fd = fd;
+ continue;
+ }
+
+ close(fd);
+ }
+
+ err = 0;
+ if (xsk->qidconf_map_fd < 0 || xsk->xsks_map_fd < 0) {
+ err = -ENOENT;
+ xsk_delete_bpf_maps(xsk);
+ }
+
+out_map_ids:
+ free(map_ids);
+ return err;
+}
+
+static void xsk_clear_bpf_maps(struct xsk_socket *xsk)
+{
+ int qid = false;
+
+ bpf_map_update_elem(xsk->qidconf_map_fd, &xsk->queue_id, &qid, 0);
+ bpf_map_delete_elem(xsk->xsks_map_fd, &xsk->queue_id);
+}
+
+static int xsk_set_bpf_maps(struct xsk_socket *xsk)
+{
+ int qid = true, fd = xsk->fd, err;
+
+ err = bpf_map_update_elem(xsk->qidconf_map_fd, &xsk->queue_id, &qid, 0);
+ if (err)
+ goto out;
+
+ err = bpf_map_update_elem(xsk->xsks_map_fd, &xsk->queue_id, &fd, 0);
+ if (err)
+ goto out;
+
+ return 0;
+out:
+ xsk_clear_bpf_maps(xsk);
+ return err;
+}
+
+static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
+{
+ __u32 prog_id = 0;
+ int err;
+
+ err = bpf_get_link_xdp_id(xsk->ifindex, &prog_id,
+ xsk->config.xdp_flags);
+ if (err)
+ return err;
+
+ if (!prog_id) {
+ err = xsk_create_bpf_maps(xsk);
+ if (err)
+ return err;
+
+ err = xsk_load_xdp_prog(xsk);
+ if (err)
+ goto out_maps;
+ } else {
+ xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id);
+ err = xsk_lookup_bpf_maps(xsk);
+ if (err)
+ goto out_load;
+ }
+
+ err = xsk_set_bpf_maps(xsk);
+ if (err)
+ goto out_load;
+
+ return 0;
+
+out_load:
+ close(xsk->prog_fd);
+out_maps:
+ xsk_delete_bpf_maps(xsk);
+ return err;
+}
+
+int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
+ __u32 queue_id, struct xsk_umem *umem,
+ struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
+ const struct xsk_socket_config *usr_config)
+{
+ void *rx_map = NULL, *tx_map = NULL;
+ struct sockaddr_xdp sxdp = {};
+ struct xdp_mmap_offsets off;
+ struct xsk_socket *xsk;
+ socklen_t optlen;
+ int err;
+
+ if (!umem || !xsk_ptr || !rx || !tx)
+ return -EFAULT;
+
+ if (umem->refcount) {
+ pr_warning("Error: shared umems not supported by libbpf.\n");
+ return -EBUSY;
+ }
+
+ xsk = calloc(1, sizeof(*xsk));
+ if (!xsk)
+ return -ENOMEM;
+
+ if (umem->refcount++ > 0) {
+ xsk->fd = socket(AF_XDP, SOCK_RAW, 0);
+ if (xsk->fd < 0) {
+ err = -errno;
+ goto out_xsk_alloc;
+ }
+ } else {
+ xsk->fd = umem->fd;
+ }
+
+ xsk->outstanding_tx = 0;
+ xsk->queue_id = queue_id;
+ xsk->umem = umem;
+ xsk->ifindex = if_nametoindex(ifname);
+ if (!xsk->ifindex) {
+ err = -errno;
+ goto out_socket;
+ }
+ strncpy(xsk->ifname, ifname, IFNAMSIZ);
+
+ err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
+ if (err)
+ goto out_socket;
+
+ if (rx) {
+ err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
+ &xsk->config.rx_size,
+ sizeof(xsk->config.rx_size));
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+ }
+ if (tx) {
+ err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
+ &xsk->config.tx_size,
+ sizeof(xsk->config.tx_size));
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+ }
+
+ optlen = sizeof(off);
+ err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ if (err) {
+ err = -errno;
+ goto out_socket;
+ }
+
+ if (rx) {
+ rx_map = xsk_mmap(NULL, off.rx.desc +
+ xsk->config.rx_size * sizeof(struct xdp_desc),
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE,
+ xsk->fd, XDP_PGOFF_RX_RING);
+ if (rx_map == MAP_FAILED) {
+ err = -errno;
+ goto out_socket;
+ }
+
+ rx->mask = xsk->config.rx_size - 1;
+ rx->size = xsk->config.rx_size;
+ rx->producer = rx_map + off.rx.producer;
+ rx->consumer = rx_map + off.rx.consumer;
+ rx->ring = rx_map + off.rx.desc;
+ }
+ xsk->rx = rx;
+
+ if (tx) {
+ tx_map = xsk_mmap(NULL, off.tx.desc +
+ xsk->config.tx_size * sizeof(struct xdp_desc),
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE,
+ xsk->fd, XDP_PGOFF_TX_RING);
+ if (tx_map == MAP_FAILED) {
+ err = -errno;
+ goto out_mmap_rx;
+ }
+
+ tx->mask = xsk->config.tx_size - 1;
+ tx->size = xsk->config.tx_size;
+ tx->producer = tx_map + off.tx.producer;
+ tx->consumer = tx_map + off.tx.consumer;
+ tx->ring = tx_map + off.tx.desc;
+ tx->cached_cons = xsk->config.tx_size;
+ }
+ xsk->tx = tx;
+
+ sxdp.sxdp_family = PF_XDP;
+ sxdp.sxdp_ifindex = xsk->ifindex;
+ sxdp.sxdp_queue_id = xsk->queue_id;
+ sxdp.sxdp_flags = xsk->config.bind_flags;
+
+ err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
+ if (err) {
+ err = -errno;
+ goto out_mmap_tx;
+ }
+
+ xsk->qidconf_map_fd = -1;
+ xsk->xsks_map_fd = -1;
+
+ if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
+ err = xsk_setup_xdp_prog(xsk);
+ if (err)
+ goto out_mmap_tx;
+ }
+
+ *xsk_ptr = xsk;
+ return 0;
+
+out_mmap_tx:
+ if (tx)
+ munmap(tx_map, off.tx.desc +
+ xsk->config.tx_size * sizeof(struct xdp_desc));
+out_mmap_rx:
+ if (rx)
+ munmap(rx_map, off.rx.desc +
+ xsk->config.rx_size * sizeof(struct xdp_desc));
+out_socket:
+ if (--umem->refcount)
+ close(xsk->fd);
+out_xsk_alloc:
+ free(xsk);
+ return err;
+}
+
+int xsk_umem__delete(struct xsk_umem *umem)
+{
+ struct xdp_mmap_offsets off;
+ socklen_t optlen;
+ int err;
+
+ if (!umem)
+ return 0;
+
+ if (umem->refcount)
+ return -EBUSY;
+
+ optlen = sizeof(off);
+ err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ if (!err) {
+ munmap(umem->fill->ring - off.fr.desc,
+ off.fr.desc + umem->config.fill_size * sizeof(__u64));
+ munmap(umem->comp->ring - off.cr.desc,
+ off.cr.desc + umem->config.comp_size * sizeof(__u64));
+ }
+
+ close(umem->fd);
+ free(umem);
+
+ return 0;
+}
+
+void xsk_socket__delete(struct xsk_socket *xsk)
+{
+ size_t desc_sz = sizeof(struct xdp_desc);
+ struct xdp_mmap_offsets off;
+ socklen_t optlen;
+ int err;
+
+ if (!xsk)
+ return;
+
+ xsk_clear_bpf_maps(xsk);
+ xsk_delete_bpf_maps(xsk);
+
+ optlen = sizeof(off);
+ err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
+ if (!err) {
+ if (xsk->rx) {
+ munmap(xsk->rx->ring - off.rx.desc,
+ off.rx.desc + xsk->config.rx_size * desc_sz);
+ }
+ if (xsk->tx) {
+ munmap(xsk->tx->ring - off.tx.desc,
+ off.tx.desc + xsk->config.tx_size * desc_sz);
+ }
+
+ }
+
+ xsk->umem->refcount--;
+ /* Do not close an fd that also has an associated umem connected
+ * to it.
+ */
+ if (xsk->fd != xsk->umem->fd)
+ close(xsk->fd);
+ free(xsk);
+}
diff --git a/tools/lib/bpf/xsk.h b/tools/lib/bpf/xsk.h
new file mode 100644
index 000000000000..82ea71a0f3ec
--- /dev/null
+++ b/tools/lib/bpf/xsk.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+/*
+ * AF_XDP user-space access library.
+ *
+ * Copyright(c) 2018 - 2019 Intel Corporation.
+ *
+ * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
+ */
+
+#ifndef __LIBBPF_XSK_H
+#define __LIBBPF_XSK_H
+
+#include <stdio.h>
+#include <stdint.h>
+#include <linux/if_xdp.h>
+
+#include "libbpf.h"
+#include "libbpf_util.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Do not access these members directly. Use the functions below. */
+#define DEFINE_XSK_RING(name) \
+struct name { \
+ __u32 cached_prod; \
+ __u32 cached_cons; \
+ __u32 mask; \
+ __u32 size; \
+ __u32 *producer; \
+ __u32 *consumer; \
+ void *ring; \
+}
+
+DEFINE_XSK_RING(xsk_ring_prod);
+DEFINE_XSK_RING(xsk_ring_cons);
+
+/* For a detailed explanation on the memory barriers associated with the
+ * ring, please take a look at net/xdp/xsk_queue.h.
+ */
+
+struct xsk_umem;
+struct xsk_socket;
+
+static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill,
+ __u32 idx)
+{
+ __u64 *addrs = (__u64 *)fill->ring;
+
+ return &addrs[idx & fill->mask];
+}
+
+static inline const __u64 *
+xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx)
+{
+ const __u64 *addrs = (const __u64 *)comp->ring;
+
+ return &addrs[idx & comp->mask];
+}
+
+static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx,
+ __u32 idx)
+{
+ struct xdp_desc *descs = (struct xdp_desc *)tx->ring;
+
+ return &descs[idx & tx->mask];
+}
+
+static inline const struct xdp_desc *
+xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx)
+{
+ const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring;
+
+ return &descs[idx & rx->mask];
+}
+
+static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
+{
+ __u32 free_entries = r->cached_cons - r->cached_prod;
+
+ if (free_entries >= nb)
+ return free_entries;
+
+ /* Refresh the local tail pointer.
+ * cached_cons is r->size bigger than the real consumer pointer so
+ * that this addition can be avoided in the more frequently
+ * executed code that computs free_entries in the beginning of
+ * this function. Without this optimization it whould have been
+ * free_entries = r->cached_prod - r->cached_cons + r->size.
+ */
+ r->cached_cons = *r->consumer + r->size;
+
+ return r->cached_cons - r->cached_prod;
+}
+
+static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
+{
+ __u32 entries = r->cached_prod - r->cached_cons;
+
+ if (entries == 0) {
+ r->cached_prod = *r->producer;
+ entries = r->cached_prod - r->cached_cons;
+ }
+
+ return (entries > nb) ? nb : entries;
+}
+
+static inline size_t xsk_ring_prod__reserve(struct xsk_ring_prod *prod,
+ size_t nb, __u32 *idx)
+{
+ if (xsk_prod_nb_free(prod, nb) < nb)
+ return 0;
+
+ *idx = prod->cached_prod;
+ prod->cached_prod += nb;
+
+ return nb;
+}
+
+static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, size_t nb)
+{
+ /* Make sure everything has been written to the ring before indicating
+ * this to the kernel by writing the producer pointer.
+ */
+ libbpf_smp_wmb();
+
+ *prod->producer += nb;
+}
+
+static inline size_t xsk_ring_cons__peek(struct xsk_ring_cons *cons,
+ size_t nb, __u32 *idx)
+{
+ size_t entries = xsk_cons_nb_avail(cons, nb);
+
+ if (entries > 0) {
+ /* Make sure we do not speculatively read the data before
+ * we have received the packet buffers from the ring.
+ */
+ libbpf_smp_rmb();
+
+ *idx = cons->cached_cons;
+ cons->cached_cons += entries;
+ }
+
+ return entries;
+}
+
+static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, size_t nb)
+{
+ /* Make sure data has been read before indicating we are done
+ * with the entries by updating the consumer pointer.
+ */
+ libbpf_smp_rwmb();
+
+ *cons->consumer += nb;
+}
+
+static inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
+{
+ return &((char *)umem_area)[addr];
+}
+
+LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem);
+LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk);
+
+#define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048
+#define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048
+#define XSK_UMEM__DEFAULT_FRAME_SHIFT 11 /* 2048 bytes */
+#define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT)
+#define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0
+
+struct xsk_umem_config {
+ __u32 fill_size;
+ __u32 comp_size;
+ __u32 frame_size;
+ __u32 frame_headroom;
+};
+
+/* Flags for the libbpf_flags field. */
+#define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0)
+
+struct xsk_socket_config {
+ __u32 rx_size;
+ __u32 tx_size;
+ __u32 libbpf_flags;
+ __u32 xdp_flags;
+ __u16 bind_flags;
+};
+
+/* Set config to NULL to get the default configuration. */
+LIBBPF_API int xsk_umem__create(struct xsk_umem **umem,
+ void *umem_area, __u64 size,
+ struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *config);
+LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk,
+ const char *ifname, __u32 queue_id,
+ struct xsk_umem *umem,
+ struct xsk_ring_cons *rx,
+ struct xsk_ring_prod *tx,
+ const struct xsk_socket_config *config);
+
+/* Returns 0 for success and -EBUSY if the umem is still in use. */
+LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem);
+LIBBPF_API void xsk_socket__delete(struct xsk_socket *xsk);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* __LIBBPF_XSK_H */
diff --git a/tools/lib/find_bit.c b/tools/lib/find_bit.c
index a88bd507091e..ac37022e9486 100644
--- a/tools/lib/find_bit.c
+++ b/tools/lib/find_bit.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/* bit search implementation
*
* Copied from lib/find_bit.c to tools/lib/find_bit.c
@@ -11,11 +12,6 @@
*
* Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
* size and improve performance, 2015.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/bitops.h>
diff --git a/tools/lib/lockdep/include/liblockdep/common.h b/tools/lib/lockdep/include/liblockdep/common.h
index d640a9761f09..a81d91d4fc78 100644
--- a/tools/lib/lockdep/include/liblockdep/common.h
+++ b/tools/lib/lockdep/include/liblockdep/common.h
@@ -45,6 +45,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip);
void lockdep_reset_lock(struct lockdep_map *lock);
+void lockdep_register_key(struct lock_class_key *key);
+void lockdep_unregister_key(struct lock_class_key *key);
extern void debug_check_no_locks_freed(const void *from, unsigned long len);
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
diff --git a/tools/lib/lockdep/include/liblockdep/mutex.h b/tools/lib/lockdep/include/liblockdep/mutex.h
index 2073d4e1f2f0..783dd0df06f9 100644
--- a/tools/lib/lockdep/include/liblockdep/mutex.h
+++ b/tools/lib/lockdep/include/liblockdep/mutex.h
@@ -7,6 +7,7 @@
struct liblockdep_pthread_mutex {
pthread_mutex_t mutex;
+ struct lock_class_key key;
struct lockdep_map dep_map;
};
@@ -27,11 +28,10 @@ static inline int __mutex_init(liblockdep_pthread_mutex_t *lock,
return pthread_mutex_init(&lock->mutex, __mutexattr);
}
-#define liblockdep_pthread_mutex_init(mutex, mutexattr) \
-({ \
- static struct lock_class_key __key; \
- \
- __mutex_init((mutex), #mutex, &__key, (mutexattr)); \
+#define liblockdep_pthread_mutex_init(mutex, mutexattr) \
+({ \
+ lockdep_register_key(&(mutex)->key); \
+ __mutex_init((mutex), #mutex, &(mutex)->key, (mutexattr)); \
})
static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock)
@@ -55,6 +55,7 @@ static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *l
static inline int liblockdep_pthread_mutex_destroy(liblockdep_pthread_mutex_t *lock)
{
lockdep_reset_lock(&lock->dep_map);
+ lockdep_unregister_key(&lock->key);
return pthread_mutex_destroy(&lock->mutex);
}
diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh
index c8fbd0306960..11f425662b43 100755
--- a/tools/lib/lockdep/run_tests.sh
+++ b/tools/lib/lockdep/run_tests.sh
@@ -11,7 +11,7 @@ find tests -name '*.c' | sort | while read -r i; do
testname=$(basename "$i" .c)
echo -ne "$testname... "
if gcc -o "tests/$testname" -pthread "$i" liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &&
- timeout 1 "tests/$testname" 2>&1 | "tests/${testname}.sh"; then
+ timeout 1 "tests/$testname" 2>&1 | /bin/bash "tests/${testname}.sh"; then
echo "PASSED!"
else
echo "FAILED!"
@@ -24,7 +24,7 @@ find tests -name '*.c' | sort | while read -r i; do
echo -ne "(PRELOAD) $testname... "
if gcc -o "tests/$testname" -pthread -Iinclude "$i" &&
timeout 1 ./lockdep "tests/$testname" 2>&1 |
- "tests/${testname}.sh"; then
+ /bin/bash "tests/${testname}.sh"; then
echo "PASSED!"
else
echo "FAILED!"
@@ -37,7 +37,7 @@ find tests -name '*.c' | sort | while read -r i; do
echo -ne "(PRELOAD + Valgrind) $testname... "
if gcc -o "tests/$testname" -pthread -Iinclude "$i" &&
{ timeout 10 valgrind --read-var-info=yes ./lockdep "./tests/$testname" >& "tests/${testname}.vg.out"; true; } &&
- "tests/${testname}.sh" < "tests/${testname}.vg.out" &&
+ /bin/bash "tests/${testname}.sh" < "tests/${testname}.vg.out" &&
! grep -Eq '(^==[0-9]*== (Invalid |Uninitialised ))|Mismatched free|Source and destination overlap| UME ' "tests/${testname}.vg.out"; then
echo "PASSED!"
else
diff --git a/tools/lib/lockdep/tests/ABBA.c b/tools/lib/lockdep/tests/ABBA.c
index 623313f54720..543789bc3e37 100644
--- a/tools/lib/lockdep/tests/ABBA.c
+++ b/tools/lib/lockdep/tests/ABBA.c
@@ -14,4 +14,13 @@ void main(void)
pthread_mutex_destroy(&b);
pthread_mutex_destroy(&a);
+
+ pthread_mutex_init(&a, NULL);
+ pthread_mutex_init(&b, NULL);
+
+ LOCK_UNLOCK_2(a, b);
+ LOCK_UNLOCK_2(b, a);
+
+ pthread_mutex_destroy(&b);
+ pthread_mutex_destroy(&a);
}
diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c
index 17c2b596f043..804f145e3113 100644
--- a/tools/lib/rbtree.c
+++ b/tools/lib/rbtree.c
@@ -1,27 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
(C) 2002 David Woodhouse <dwmw2@infradead.org>
(C) 2012 Michel Lespinasse <walken@google.com>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
linux/lib/rbtree.c
*/
#include <linux/rbtree_augmented.h>
+#include <linux/export.h>
/*
* red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
@@ -43,6 +32,30 @@
* parentheses and have some accompanying text comment.
*/
+/*
+ * Notes on lockless lookups:
+ *
+ * All stores to the tree structure (rb_left and rb_right) must be done using
+ * WRITE_ONCE(). And we must not inadvertently cause (temporary) loops in the
+ * tree structure as seen in program order.
+ *
+ * These two requirements will allow lockless iteration of the tree -- not
+ * correct iteration mind you, tree rotations are not atomic so a lookup might
+ * miss entire subtrees.
+ *
+ * But they do guarantee that any such traversal will only see valid elements
+ * and that it will indeed complete -- does not get stuck in a loop.
+ *
+ * It also guarantees that if the lookup returns an element it is the 'correct'
+ * one. But not returning an element does _NOT_ mean it's not present.
+ *
+ * NOTE:
+ *
+ * Stores to __rb_parent_color are not important for simple lookups so those
+ * are left undone as of now. Nor did I check for loops involving parent
+ * pointers.
+ */
+
static inline void rb_set_black(struct rb_node *rb)
{
rb->__rb_parent_color |= RB_BLACK;
@@ -70,22 +83,35 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
static __always_inline void
__rb_insert(struct rb_node *node, struct rb_root *root,
+ bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
+ if (newleft)
+ *leftmost = node;
+
while (true) {
/*
- * Loop invariant: node is red
- *
- * If there is a black parent, we are done.
- * Otherwise, take some corrective action as we don't
- * want a red root or two consecutive red nodes.
+ * Loop invariant: node is red.
*/
- if (!parent) {
+ if (unlikely(!parent)) {
+ /*
+ * The inserted node is root. Either this is the
+ * first node, or we recursed at Case 1 below and
+ * are no longer violating 4).
+ */
rb_set_parent_color(node, NULL, RB_BLACK);
break;
- } else if (rb_is_black(parent))
+ }
+
+ /*
+ * If there is a black parent, we are done.
+ * Otherwise, take some corrective action as,
+ * per 4), we don't want a red root or two
+ * consecutive red nodes.
+ */
+ if(rb_is_black(parent))
break;
gparent = rb_red_parent(parent);
@@ -94,7 +120,7 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
if (parent != tmp) { /* parent == gparent->rb_left */
if (tmp && rb_is_red(tmp)) {
/*
- * Case 1 - color flips
+ * Case 1 - node's uncle is red (color flips).
*
* G g
* / \ / \
@@ -117,7 +143,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
tmp = parent->rb_right;
if (node == tmp) {
/*
- * Case 2 - left rotate at parent
+ * Case 2 - node's uncle is black and node is
+ * the parent's right child (left rotate at parent).
*
* G G
* / \ / \
@@ -128,8 +155,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
* This still leaves us in violation of 4), the
* continuation into Case 3 will fix that.
*/
- parent->rb_right = tmp = node->rb_left;
- node->rb_left = parent;
+ tmp = node->rb_left;
+ WRITE_ONCE(parent->rb_right, tmp);
+ WRITE_ONCE(node->rb_left, parent);
if (tmp)
rb_set_parent_color(tmp, parent,
RB_BLACK);
@@ -140,7 +168,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
}
/*
- * Case 3 - right rotate at gparent
+ * Case 3 - node's uncle is black and node is
+ * the parent's left child (right rotate at gparent).
*
* G P
* / \ / \
@@ -148,8 +177,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
* / \
* n U
*/
- gparent->rb_left = tmp; /* == parent->rb_right */
- parent->rb_right = gparent;
+ WRITE_ONCE(gparent->rb_left, tmp); /* == parent->rb_right */
+ WRITE_ONCE(parent->rb_right, gparent);
if (tmp)
rb_set_parent_color(tmp, gparent, RB_BLACK);
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
@@ -170,8 +199,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
tmp = parent->rb_left;
if (node == tmp) {
/* Case 2 - right rotate at parent */
- parent->rb_left = tmp = node->rb_right;
- node->rb_right = parent;
+ tmp = node->rb_right;
+ WRITE_ONCE(parent->rb_left, tmp);
+ WRITE_ONCE(node->rb_right, parent);
if (tmp)
rb_set_parent_color(tmp, parent,
RB_BLACK);
@@ -182,8 +212,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
}
/* Case 3 - left rotate at gparent */
- gparent->rb_right = tmp; /* == parent->rb_left */
- parent->rb_left = gparent;
+ WRITE_ONCE(gparent->rb_right, tmp); /* == parent->rb_left */
+ WRITE_ONCE(parent->rb_left, gparent);
if (tmp)
rb_set_parent_color(tmp, gparent, RB_BLACK);
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
@@ -223,8 +253,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
* / \ / \
* Sl Sr N Sl
*/
- parent->rb_right = tmp1 = sibling->rb_left;
- sibling->rb_left = parent;
+ tmp1 = sibling->rb_left;
+ WRITE_ONCE(parent->rb_right, tmp1);
+ WRITE_ONCE(sibling->rb_left, parent);
rb_set_parent_color(tmp1, parent, RB_BLACK);
__rb_rotate_set_parents(parent, sibling, root,
RB_RED);
@@ -268,15 +299,31 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
*
* (p) (p)
* / \ / \
- * N S --> N Sl
+ * N S --> N sl
* / \ \
- * sl Sr s
+ * sl Sr S
* \
* Sr
+ *
+ * Note: p might be red, and then both
+ * p and sl are red after rotation(which
+ * breaks property 4). This is fixed in
+ * Case 4 (in __rb_rotate_set_parents()
+ * which set sl the color of p
+ * and set p RB_BLACK)
+ *
+ * (p) (sl)
+ * / \ / \
+ * N sl --> P S
+ * \ / \
+ * S N Sr
+ * \
+ * Sr
*/
- sibling->rb_left = tmp1 = tmp2->rb_right;
- tmp2->rb_right = sibling;
- parent->rb_right = tmp2;
+ tmp1 = tmp2->rb_right;
+ WRITE_ONCE(sibling->rb_left, tmp1);
+ WRITE_ONCE(tmp2->rb_right, sibling);
+ WRITE_ONCE(parent->rb_right, tmp2);
if (tmp1)
rb_set_parent_color(tmp1, sibling,
RB_BLACK);
@@ -296,8 +343,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
* / \ / \
* (sl) sr N (sl)
*/
- parent->rb_right = tmp2 = sibling->rb_left;
- sibling->rb_left = parent;
+ tmp2 = sibling->rb_left;
+ WRITE_ONCE(parent->rb_right, tmp2);
+ WRITE_ONCE(sibling->rb_left, parent);
rb_set_parent_color(tmp1, sibling, RB_BLACK);
if (tmp2)
rb_set_parent(tmp2, parent);
@@ -309,8 +357,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
sibling = parent->rb_left;
if (rb_is_red(sibling)) {
/* Case 1 - right rotate at parent */
- parent->rb_left = tmp1 = sibling->rb_right;
- sibling->rb_right = parent;
+ tmp1 = sibling->rb_right;
+ WRITE_ONCE(parent->rb_left, tmp1);
+ WRITE_ONCE(sibling->rb_right, parent);
rb_set_parent_color(tmp1, parent, RB_BLACK);
__rb_rotate_set_parents(parent, sibling, root,
RB_RED);
@@ -334,10 +383,11 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
}
break;
}
- /* Case 3 - right rotate at sibling */
- sibling->rb_right = tmp1 = tmp2->rb_left;
- tmp2->rb_left = sibling;
- parent->rb_left = tmp2;
+ /* Case 3 - left rotate at sibling */
+ tmp1 = tmp2->rb_left;
+ WRITE_ONCE(sibling->rb_right, tmp1);
+ WRITE_ONCE(tmp2->rb_left, sibling);
+ WRITE_ONCE(parent->rb_left, tmp2);
if (tmp1)
rb_set_parent_color(tmp1, sibling,
RB_BLACK);
@@ -345,9 +395,10 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root,
tmp1 = sibling;
sibling = tmp2;
}
- /* Case 4 - left rotate at parent + color flips */
- parent->rb_left = tmp2 = sibling->rb_right;
- sibling->rb_right = parent;
+ /* Case 4 - right rotate at parent + color flips */
+ tmp2 = sibling->rb_right;
+ WRITE_ONCE(parent->rb_left, tmp2);
+ WRITE_ONCE(sibling->rb_right, parent);
rb_set_parent_color(tmp1, sibling, RB_BLACK);
if (tmp2)
rb_set_parent(tmp2, parent);
@@ -378,22 +429,41 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
static const struct rb_augment_callbacks dummy_callbacks = {
- dummy_propagate, dummy_copy, dummy_rotate
+ .propagate = dummy_propagate,
+ .copy = dummy_copy,
+ .rotate = dummy_rotate
};
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
- __rb_insert(node, root, dummy_rotate);
+ __rb_insert(node, root, false, NULL, dummy_rotate);
}
void rb_erase(struct rb_node *node, struct rb_root *root)
{
struct rb_node *rebalance;
- rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
+ rebalance = __rb_erase_augmented(node, root,
+ NULL, &dummy_callbacks);
if (rebalance)
____rb_erase_color(rebalance, root, dummy_rotate);
}
+void rb_insert_color_cached(struct rb_node *node,
+ struct rb_root_cached *root, bool leftmost)
+{
+ __rb_insert(node, &root->rb_root, leftmost,
+ &root->rb_leftmost, dummy_rotate);
+}
+
+void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
+{
+ struct rb_node *rebalance;
+ rebalance = __rb_erase_augmented(node, &root->rb_root,
+ &root->rb_leftmost, &dummy_callbacks);
+ if (rebalance)
+ ____rb_erase_color(rebalance, &root->rb_root, dummy_rotate);
+}
+
/*
* Augmented rbtree manipulation functions.
*
@@ -402,9 +472,10 @@ void rb_erase(struct rb_node *node, struct rb_root *root)
*/
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+ bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
- __rb_insert(node, root, augment_rotate);
+ __rb_insert(node, root, newleft, leftmost, augment_rotate);
}
/*
@@ -498,15 +569,24 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
{
struct rb_node *parent = rb_parent(victim);
+ /* Copy the pointers/colour from the victim to the replacement */
+ *new = *victim;
+
/* Set the surrounding nodes to point to the replacement */
- __rb_change_child(victim, new, parent, root);
if (victim->rb_left)
rb_set_parent(victim->rb_left, new);
if (victim->rb_right)
rb_set_parent(victim->rb_right, new);
+ __rb_change_child(victim, new, parent, root);
+}
- /* Copy the pointers/colour from the victim to the replacement */
- *new = *victim;
+void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
+ struct rb_root_cached *root)
+{
+ rb_replace_node(victim, new, &root->rb_root);
+
+ if (root->rb_leftmost == victim)
+ root->rb_leftmost = new;
}
static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
diff --git a/tools/lib/traceevent/Documentation/Makefile b/tools/lib/traceevent/Documentation/Makefile
new file mode 100644
index 000000000000..aa72ab96c3c1
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/Makefile
@@ -0,0 +1,207 @@
+include ../../../scripts/Makefile.include
+include ../../../scripts/utilities.mak
+
+# This Makefile and manpage XSL files were taken from tools/perf/Documentation
+# and modified for libtraceevent.
+
+MAN3_TXT= \
+ $(wildcard libtraceevent-*.txt) \
+ libtraceevent.txt
+
+MAN_TXT = $(MAN3_TXT)
+_MAN_XML=$(patsubst %.txt,%.xml,$(MAN_TXT))
+_MAN_HTML=$(patsubst %.txt,%.html,$(MAN_TXT))
+_DOC_MAN3=$(patsubst %.txt,%.3,$(MAN3_TXT))
+
+MAN_XML=$(addprefix $(OUTPUT),$(_MAN_XML))
+MAN_HTML=$(addprefix $(OUTPUT),$(_MAN_HTML))
+DOC_MAN3=$(addprefix $(OUTPUT),$(_DOC_MAN3))
+
+# Make the path relative to DESTDIR, not prefix
+ifndef DESTDIR
+prefix?=$(HOME)
+endif
+bindir?=$(prefix)/bin
+htmldir?=$(prefix)/share/doc/libtraceevent-doc
+pdfdir?=$(prefix)/share/doc/libtraceevent-doc
+mandir?=$(prefix)/share/man
+man3dir=$(mandir)/man3
+
+ASCIIDOC=asciidoc
+ASCIIDOC_EXTRA = --unsafe -f asciidoc.conf
+ASCIIDOC_HTML = xhtml11
+MANPAGE_XSL = manpage-normal.xsl
+XMLTO_EXTRA =
+INSTALL?=install
+RM ?= rm -f
+
+ifdef USE_ASCIIDOCTOR
+ASCIIDOC = asciidoctor
+ASCIIDOC_EXTRA = -a compat-mode
+ASCIIDOC_EXTRA += -I. -rasciidoctor-extensions
+ASCIIDOC_EXTRA += -a mansource="libtraceevent" -a manmanual="libtraceevent Manual"
+ASCIIDOC_HTML = xhtml5
+endif
+
+XMLTO=xmlto
+
+_tmp_tool_path := $(call get-executable,$(ASCIIDOC))
+ifeq ($(_tmp_tool_path),)
+ missing_tools = $(ASCIIDOC)
+endif
+
+ifndef USE_ASCIIDOCTOR
+_tmp_tool_path := $(call get-executable,$(XMLTO))
+ifeq ($(_tmp_tool_path),)
+ missing_tools += $(XMLTO)
+endif
+endif
+
+#
+# For asciidoc ...
+# -7.1.2, no extra settings are needed.
+# 8.0-, set ASCIIDOC8.
+#
+
+#
+# For docbook-xsl ...
+# -1.68.1, set ASCIIDOC_NO_ROFF? (based on changelog from 1.73.0)
+# 1.69.0, no extra settings are needed?
+# 1.69.1-1.71.0, set DOCBOOK_SUPPRESS_SP?
+# 1.71.1, no extra settings are needed?
+# 1.72.0, set DOCBOOK_XSL_172.
+# 1.73.0-, set ASCIIDOC_NO_ROFF
+#
+
+#
+# If you had been using DOCBOOK_XSL_172 in an attempt to get rid
+# of 'the ".ft C" problem' in your generated manpages, and you
+# instead ended up with weird characters around callouts, try
+# using ASCIIDOC_NO_ROFF instead (it works fine with ASCIIDOC8).
+#
+
+ifdef ASCIIDOC8
+ASCIIDOC_EXTRA += -a asciidoc7compatible
+endif
+ifdef DOCBOOK_XSL_172
+ASCIIDOC_EXTRA += -a libtraceevent-asciidoc-no-roff
+MANPAGE_XSL = manpage-1.72.xsl
+else
+ ifdef ASCIIDOC_NO_ROFF
+ # docbook-xsl after 1.72 needs the regular XSL, but will not
+ # pass-thru raw roff codes from asciidoc.conf, so turn them off.
+ ASCIIDOC_EXTRA += -a libtraceevent-asciidoc-no-roff
+ endif
+endif
+ifdef MAN_BOLD_LITERAL
+XMLTO_EXTRA += -m manpage-bold-literal.xsl
+endif
+ifdef DOCBOOK_SUPPRESS_SP
+XMLTO_EXTRA += -m manpage-suppress-sp.xsl
+endif
+
+SHELL_PATH ?= $(SHELL)
+# Shell quote;
+SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
+
+DESTDIR ?=
+DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
+
+export DESTDIR DESTDIR_SQ
+
+#
+# Please note that there is a minor bug in asciidoc.
+# The version after 6.0.3 _will_ include the patch found here:
+# http://marc.theaimsgroup.com/?l=libtraceevent&m=111558757202243&w=2
+#
+# Until that version is released you may have to apply the patch
+# yourself - yes, all 6 characters of it!
+#
+QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir
+QUIET_SUBDIR1 =
+
+ifneq ($(findstring $(MAKEFLAGS),w),w)
+PRINT_DIR = --no-print-directory
+else # "make -w"
+NO_SUBDIR = :
+endif
+
+ifneq ($(findstring $(MAKEFLAGS),s),s)
+ifneq ($(V),1)
+ QUIET_ASCIIDOC = @echo ' ASCIIDOC '$@;
+ QUIET_XMLTO = @echo ' XMLTO '$@;
+ QUIET_SUBDIR0 = +@subdir=
+ QUIET_SUBDIR1 = ;$(NO_SUBDIR) \
+ echo ' SUBDIR ' $$subdir; \
+ $(MAKE) $(PRINT_DIR) -C $$subdir
+ export V
+endif
+endif
+
+all: html man
+
+man: man3
+man3: $(DOC_MAN3)
+
+html: $(MAN_HTML)
+
+$(MAN_HTML) $(DOC_MAN3): asciidoc.conf
+
+install: install-man
+
+check-man-tools:
+ifdef missing_tools
+ $(error "You need to install $(missing_tools) for man pages")
+endif
+
+do-install-man: man
+ $(call QUIET_INSTALL, Documentation-man) \
+ $(INSTALL) -d -m 755 $(DESTDIR)$(man3dir); \
+ $(INSTALL) -m 644 $(DOC_MAN3) $(DESTDIR)$(man3dir);
+
+install-man: check-man-tools man do-install-man
+
+uninstall: uninstall-man
+
+uninstall-man:
+ $(call QUIET_UNINST, Documentation-man) \
+ $(Q)$(RM) $(addprefix $(DESTDIR)$(man3dir)/,$(DOC_MAN3))
+
+
+ifdef missing_tools
+ DO_INSTALL_MAN = $(warning Please install $(missing_tools) to have the man pages installed)
+else
+ DO_INSTALL_MAN = do-install-man
+endif
+
+CLEAN_FILES = \
+ $(MAN_XML) $(addsuffix +,$(MAN_XML)) \
+ $(MAN_HTML) $(addsuffix +,$(MAN_HTML)) \
+ $(DOC_MAN3) *.3
+
+clean:
+ $(call QUIET_CLEAN, Documentation) $(RM) $(CLEAN_FILES)
+
+ifdef USE_ASCIIDOCTOR
+$(OUTPUT)%.3 : $(OUTPUT)%.txt
+ $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
+ $(ASCIIDOC) -b manpage -d manpage \
+ $(ASCIIDOC_EXTRA) -alibtraceevent_version=$(EVENT_PARSE_VERSION) -o $@+ $< && \
+ mv $@+ $@
+endif
+
+$(OUTPUT)%.3 : $(OUTPUT)%.xml
+ $(QUIET_XMLTO)$(RM) $@ && \
+ $(XMLTO) -o $(OUTPUT). -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $<
+
+$(OUTPUT)%.xml : %.txt
+ $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
+ $(ASCIIDOC) -b docbook -d manpage \
+ $(ASCIIDOC_EXTRA) -alibtraceevent_version=$(EVENT_PARSE_VERSION) -o $@+ $< && \
+ mv $@+ $@
+
+$(MAN_HTML): $(OUTPUT)%.html : %.txt
+ $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
+ $(ASCIIDOC) -b $(ASCIIDOC_HTML) -d manpage \
+ $(ASCIIDOC_EXTRA) -aperf_version=$(EVENT_PARSE_VERSION) -o $@+ $< && \
+ mv $@+ $@
diff --git a/tools/lib/traceevent/Documentation/asciidoc.conf b/tools/lib/traceevent/Documentation/asciidoc.conf
new file mode 100644
index 000000000000..07595717f06e
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/asciidoc.conf
@@ -0,0 +1,120 @@
+## linktep: macro
+#
+# Usage: linktep:command[manpage-section]
+#
+# Note, {0} is the manpage section, while {target} is the command.
+#
+# Show TEP link as: <command>(<section>); if section is defined, else just show
+# the command.
+
+[macros]
+(?su)[\\]?(?P<name>linktep):(?P<target>\S*?)\[(?P<attrlist>.*?)\]=
+
+[attributes]
+asterisk=&#42;
+plus=&#43;
+caret=&#94;
+startsb=&#91;
+endsb=&#93;
+tilde=&#126;
+
+ifdef::backend-docbook[]
+[linktep-inlinemacro]
+{0%{target}}
+{0#<citerefentry>}
+{0#<refentrytitle>{target}</refentrytitle><manvolnum>{0}</manvolnum>}
+{0#</citerefentry>}
+endif::backend-docbook[]
+
+ifdef::backend-docbook[]
+ifndef::tep-asciidoc-no-roff[]
+# "unbreak" docbook-xsl v1.68 for manpages. v1.69 works with or without this.
+# v1.72 breaks with this because it replaces dots not in roff requests.
+[listingblock]
+<example><title>{title}</title>
+<literallayout>
+ifdef::doctype-manpage[]
+&#10;.ft C&#10;
+endif::doctype-manpage[]
+|
+ifdef::doctype-manpage[]
+&#10;.ft&#10;
+endif::doctype-manpage[]
+</literallayout>
+{title#}</example>
+endif::tep-asciidoc-no-roff[]
+
+ifdef::tep-asciidoc-no-roff[]
+ifdef::doctype-manpage[]
+# The following two small workarounds insert a simple paragraph after screen
+[listingblock]
+<example><title>{title}</title>
+<literallayout>
+|
+</literallayout><simpara></simpara>
+{title#}</example>
+
+[verseblock]
+<formalpara{id? id="{id}"}><title>{title}</title><para>
+{title%}<literallayout{id? id="{id}"}>
+{title#}<literallayout>
+|
+</literallayout>
+{title#}</para></formalpara>
+{title%}<simpara></simpara>
+endif::doctype-manpage[]
+endif::tep-asciidoc-no-roff[]
+endif::backend-docbook[]
+
+ifdef::doctype-manpage[]
+ifdef::backend-docbook[]
+[header]
+template::[header-declarations]
+<refentry>
+<refmeta>
+<refentrytitle>{mantitle}</refentrytitle>
+<manvolnum>{manvolnum}</manvolnum>
+<refmiscinfo class="source">libtraceevent</refmiscinfo>
+<refmiscinfo class="version">{libtraceevent_version}</refmiscinfo>
+<refmiscinfo class="manual">libtraceevent Manual</refmiscinfo>
+</refmeta>
+<refnamediv>
+ <refname>{manname1}</refname>
+ <refname>{manname2}</refname>
+ <refname>{manname3}</refname>
+ <refname>{manname4}</refname>
+ <refname>{manname5}</refname>
+ <refname>{manname6}</refname>
+ <refname>{manname7}</refname>
+ <refname>{manname8}</refname>
+ <refname>{manname9}</refname>
+ <refname>{manname10}</refname>
+ <refname>{manname11}</refname>
+ <refname>{manname12}</refname>
+ <refname>{manname13}</refname>
+ <refname>{manname14}</refname>
+ <refname>{manname15}</refname>
+ <refname>{manname16}</refname>
+ <refname>{manname17}</refname>
+ <refname>{manname18}</refname>
+ <refname>{manname19}</refname>
+ <refname>{manname20}</refname>
+ <refname>{manname21}</refname>
+ <refname>{manname22}</refname>
+ <refname>{manname23}</refname>
+ <refname>{manname24}</refname>
+ <refname>{manname25}</refname>
+ <refname>{manname26}</refname>
+ <refname>{manname27}</refname>
+ <refname>{manname28}</refname>
+ <refname>{manname29}</refname>
+ <refname>{manname30}</refname>
+ <refpurpose>{manpurpose}</refpurpose>
+</refnamediv>
+endif::backend-docbook[]
+endif::doctype-manpage[]
+
+ifdef::backend-xhtml11[]
+[linktep-inlinemacro]
+<a href="{target}.html">{target}{0?({0})}</a>
+endif::backend-xhtml11[]
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-commands.txt b/tools/lib/traceevent/Documentation/libtraceevent-commands.txt
new file mode 100644
index 000000000000..bec552001f8e
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-commands.txt
@@ -0,0 +1,153 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_register_comm, tep_override_comm, tep_pid_is_registered,
+tep_data_comm_from_pid, tep_data_pid_from_comm, tep_cmdline_pid -
+Manage pid to process name mappings.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+int *tep_register_comm*(struct tep_handle pass:[*]_tep_, const char pass:[*]_comm_, int _pid_);
+int *tep_override_comm*(struct tep_handle pass:[*]_tep_, const char pass:[*]_comm_, int _pid_);
+bool *tep_is_pid_registered*(struct tep_handle pass:[*]_tep_, int _pid_);
+const char pass:[*]*tep_data_comm_from_pid*(struct tep_handle pass:[*]_pevent_, int _pid_);
+struct cmdline pass:[*]*tep_data_pid_from_comm*(struct tep_handle pass:[*]_pevent_, const char pass:[*]_comm_, struct cmdline pass:[*]_next_);
+int *tep_cmdline_pid*(struct tep_handle pass:[*]_pevent_, struct cmdline pass:[*]_cmdline_);
+--
+
+DESCRIPTION
+-----------
+These functions can be used to handle the mapping between pid and process name.
+The library builds a cache of these mappings, which is used to display the name
+of the process, instead of its pid. This information can be retrieved from
+tracefs/saved_cmdlines file.
+
+The _tep_register_comm()_ function registers a _pid_ / process name mapping.
+If a command with the same _pid_ is already registered, an error is returned.
+The _pid_ argument is the process ID, the _comm_ argument is the process name,
+_tep_ is the event context. The _comm_ is duplicated internally.
+
+The _tep_override_comm()_ function registers a _pid_ / process name mapping.
+If a process with the same pid is already registered, the process name string is
+udapted with the new one. The _pid_ argument is the process ID, the _comm_
+argument is the process name, _tep_ is the event context. The _comm_ is
+duplicated internally.
+
+The _tep_is_pid_registered()_ function checks if a pid has a process name
+mapping registered. The _pid_ argument is the process ID, _tep_ is the event
+context.
+
+The _tep_data_comm_from_pid()_ function returns the process name for a given
+pid. The _pid_ argument is the process ID, _tep_ is the event context.
+The returned string should not be freed, but will be freed when the _tep_
+handler is closed.
+
+The _tep_data_pid_from_comm()_ function returns a pid for a given process name.
+The _comm_ argument is the process name, _tep_ is the event context.
+The argument _next_ is the cmdline structure to search for the next pid.
+As there may be more than one pid for a given process, the result of this call
+can be passed back into a recurring call in the _next_ parameter, to search for
+the next pid. If _next_ is NULL, it will return the first pid associated with
+the _comm_. The function performs a linear search, so it may be slow.
+
+The _tep_cmdline_pid()_ function returns the pid associated with a given
+_cmdline_. The _tep_ argument is the event context.
+
+RETURN VALUE
+------------
+_tep_register_comm()_ function returns 0 on success. In case of an error -1 is
+returned and errno is set to indicate the cause of the problem: ENOMEM, if there
+is not enough memory to duplicate the _comm_ or EEXIST if a mapping for this
+_pid_ is already registered.
+
+_tep_override_comm()_ function returns 0 on success. In case of an error -1 is
+returned and errno is set to indicate the cause of the problem: ENOMEM, if there
+is not enough memory to duplicate the _comm_.
+
+_tep_is_pid_registered()_ function returns true if the _pid_ has a process name
+mapped to it, false otherwise.
+
+_tep_data_comm_from_pid()_ function returns the process name as string, or the
+string "<...>" if there is no mapping for the given pid.
+
+_tep_data_pid_from_comm()_ function returns a pointer to a struct cmdline, that
+holds a pid for a given process, or NULL if none is found. This result can be
+passed back into a recurring call as the _next_ parameter of the function.
+
+_tep_cmdline_pid()_ functions returns the pid for the give cmdline. If _cmdline_
+ is NULL, then -1 is returned.
+
+EXAMPLE
+-------
+The following example registers pid for command "ls", in context of event _tep_
+and performs various searches for pid / process name mappings:
+[source,c]
+--
+#include <event-parse.h>
+...
+int ret;
+int ls_pid = 1021;
+struct tep_handle *tep = tep_alloc();
+...
+ ret = tep_register_comm(tep, "ls", ls_pid);
+ if (ret != 0 && errno == EEXIST)
+ ret = tep_override_comm(tep, "ls", ls_pid);
+ if (ret != 0) {
+ /* Failed to register pid / command mapping */
+ }
+...
+ if (tep_is_pid_registered(tep, ls_pid) == 0) {
+ /* Command mapping for ls_pid is not registered */
+ }
+...
+ const char *comm = tep_data_comm_from_pid(tep, ls_pid);
+ if (comm) {
+ /* Found process name for ls_pid */
+ }
+...
+ int pid;
+ struct cmdline *cmd = tep_data_pid_from_comm(tep, "ls", NULL);
+ while (cmd) {
+ pid = tep_cmdline_pid(tep, cmd);
+ /* Found pid for process "ls" */
+ cmd = tep_data_pid_from_comm(tep, "ls", cmd);
+ }
+--
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-cpus.txt b/tools/lib/traceevent/Documentation/libtraceevent-cpus.txt
new file mode 100644
index 000000000000..5ad70e43b752
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-cpus.txt
@@ -0,0 +1,77 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_get_cpus, tep_set_cpus - Get / set the number of CPUs, which have a tracing
+buffer representing it. Note, the buffer may be empty.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+int *tep_get_cpus*(struct tep_handle pass:[*]_tep_);
+void *tep_set_cpus*(struct tep_handle pass:[*]_tep_, int _cpus_);
+--
+
+DESCRIPTION
+-----------
+The _tep_get_cpus()_ function gets the number of CPUs, which have a tracing
+buffer representing it. The _tep_ argument is trace event parser context.
+
+The _tep_set_cpus()_ function sets the number of CPUs, which have a tracing
+buffer representing it. The _tep_ argument is trace event parser context.
+The _cpu_ argument is the number of CPUs with tracing data.
+
+RETURN VALUE
+------------
+The _tep_get_cpus()_ functions returns the number of CPUs, which have tracing
+data recorded.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+ tep_set_cpus(tep, 5);
+...
+ printf("We have tracing data for %d CPUs", tep_get_cpus(tep));
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-endian_read.txt b/tools/lib/traceevent/Documentation/libtraceevent-endian_read.txt
new file mode 100644
index 000000000000..e64851b6e189
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-endian_read.txt
@@ -0,0 +1,78 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_read_number - Reads a number from raw data.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+unsigned long long *tep_read_number*(struct tep_handle pass:[*]_tep_, const void pass:[*]_ptr_, int _size_);
+--
+
+DESCRIPTION
+-----------
+The _tep_read_number()_ function reads an integer from raw data, taking into
+account the endianness of the raw data and the current host. The _tep_ argument
+is the trace event parser context. The _ptr_ is a pointer to the raw data, where
+the integer is, and the _size_ is the size of the integer.
+
+RETURN VALUE
+------------
+The _tep_read_number()_ function returns the integer in the byte order of
+the current host. In case of an error, 0 is returned.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+void process_record(struct tep_record *record)
+{
+ int offset = 24;
+ int data = tep_read_number(tep, record->data + offset, 4);
+
+ /* Read the 4 bytes at the offset 24 of data as an integer */
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-event_find.txt b/tools/lib/traceevent/Documentation/libtraceevent-event_find.txt
new file mode 100644
index 000000000000..7bc062c9f76f
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-event_find.txt
@@ -0,0 +1,103 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_find_event,tep_find_event_by_name,tep_find_event_by_record -
+Find events by given key.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+struct tep_event pass:[*]*tep_find_event*(struct tep_handle pass:[*]_tep_, int _id_);
+struct tep_event pass:[*]*tep_find_event_by_name*(struct tep_handle pass:[*]_tep_, const char pass:[*]_sys_, const char pass:[*]_name_);
+struct tep_event pass:[*]*tep_find_event_by_record*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_record_);
+--
+
+DESCRIPTION
+-----------
+This set of functions can be used to search for an event, based on a given
+criteria. All functions require a pointer to a _tep_, trace event parser
+context.
+
+The _tep_find_event()_ function searches for an event by given event _id_. The
+event ID is assigned dynamically and can be viewed in event's format file,
+"ID" field.
+
+The tep_find_event_by_name()_ function searches for an event by given
+event _name_, under the system _sys_. If the _sys_ is NULL (not specified),
+the first event with _name_ is returned.
+
+The tep_find_event_by_record()_ function searches for an event from a given
+_record_.
+
+RETURN VALUE
+------------
+All these functions return a pointer to the found event, or NULL if there is no
+such event.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+struct tep_event *event;
+
+event = tep_find_event(tep, 1857);
+if (event == NULL) {
+ /* There is no event with ID 1857 */
+}
+
+event = tep_find_event_by_name(tep, "kvm", "kvm_exit");
+if (event == NULL) {
+ /* There is no kvm_exit event, from kvm system */
+}
+
+void event_from_record(struct tep_record *record)
+{
+ struct tep_event *event = tep_find_event_by_record(tep, record);
+ if (event == NULL) {
+ /* There is no event from given record */
+ }
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-event_get.txt b/tools/lib/traceevent/Documentation/libtraceevent-event_get.txt
new file mode 100644
index 000000000000..6525092fc417
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-event_get.txt
@@ -0,0 +1,99 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_get_event, tep_get_first_event, tep_get_events_count - Access events.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+struct tep_event pass:[*]*tep_get_event*(struct tep_handle pass:[*]_tep_, int _index_);
+struct tep_event pass:[*]*tep_get_first_event*(struct tep_handle pass:[*]_tep_);
+int *tep_get_events_count*(struct tep_handle pass:[*]_tep_);
+--
+
+DESCRIPTION
+-----------
+The _tep_get_event()_ function returns a pointer to event at the given _index_.
+The _tep_ argument is trace event parser context, the _index_ is the index of
+the requested event.
+
+The _tep_get_first_event()_ function returns a pointer to the first event.
+As events are stored in an array, this function returns the pointer to the
+beginning of the array. The _tep_ argument is trace event parser context.
+
+The _tep_get_events_count()_ function returns the number of the events
+in the array. The _tep_ argument is trace event parser context.
+
+RETURN VALUE
+------------
+The _tep_get_event()_ returns a pointer to the event located at _index_.
+NULL is returned in case of error, in case there are no events or _index_ is
+out of range.
+
+The _tep_get_first_event()_ returns a pointer to the first event. NULL is
+returned in case of error, or in case there are no events.
+
+The _tep_get_events_count()_ returns the number of the events. 0 is
+returned in case of error, or in case there are no events.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+int i,count = tep_get_events_count(tep);
+struct tep_event *event, *events = tep_get_first_event(tep);
+
+if (events == NULL) {
+ /* There are no events */
+} else {
+ for (i = 0; i < count; i++) {
+ event = (events+i);
+ /* process events[i] */
+ }
+
+ /* Get the last event */
+ event = tep_get_event(tep, count-1);
+}
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-event_list.txt b/tools/lib/traceevent/Documentation/libtraceevent-event_list.txt
new file mode 100644
index 000000000000..fba350e5a4cb
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-event_list.txt
@@ -0,0 +1,122 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_list_events, tep_list_events_copy -
+Get list of events, sorted by given criteria.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+enum *tep_event_sort_type* {
+ _TEP_EVENT_SORT_ID_,
+ _TEP_EVENT_SORT_NAME_,
+ _TEP_EVENT_SORT_SYSTEM_,
+};
+
+struct tep_event pass:[*]pass:[*]*tep_list_events*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_);
+struct tep_event pass:[*]pass:[*]*tep_list_events_copy*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_);
+--
+
+DESCRIPTION
+-----------
+The _tep_list_events()_ function returns an array of pointers to the events,
+sorted by the _sort_type_ criteria. The last element of the array is NULL.
+The returned memory must not be freed, it is managed by the library.
+The function is not thread safe. The _tep_ argument is trace event parser
+context. The _sort_type_ argument is the required sort criteria:
+[verse]
+--
+ _TEP_EVENT_SORT_ID_ - sort by the event ID.
+ _TEP_EVENT_SORT_NAME_ - sort by the event (name, system, id) triplet.
+ _TEP_EVENT_SORT_SYSTEM_ - sort by the event (system, name, id) triplet.
+--
+
+The _tep_list_events_copy()_ is a thread safe version of _tep_list_events()_.
+It has the same behavior, but the returned array is allocated internally and
+must be freed by the caller. Note that the content of the array must not be
+freed (see the EXAMPLE below).
+
+RETURN VALUE
+------------
+The _tep_list_events()_ function returns an array of pointers to events.
+In case of an error, NULL is returned. The returned array must not be freed,
+it is managed by the library.
+
+The _tep_list_events_copy()_ function returns an array of pointers to events.
+In case of an error, NULL is returned. The returned array must be freed by
+the caller.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+int i;
+struct tep_event_format **events;
+
+i=0;
+events = tep_list_events(tep, TEP_EVENT_SORT_ID);
+if (events == NULL) {
+ /* Failed to get the events, sorted by ID */
+} else {
+ while(events[i]) {
+ /* walk through the list of the events, sorted by ID */
+ i++;
+ }
+}
+
+i=0;
+events = tep_list_events_copy(tep, TEP_EVENT_SORT_NAME);
+if (events == NULL) {
+ /* Failed to get the events, sorted by name */
+} else {
+ while(events[i]) {
+ /* walk through the list of the events, sorted by name */
+ i++;
+ }
+ free(events);
+}
+
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-field_find.txt b/tools/lib/traceevent/Documentation/libtraceevent-field_find.txt
new file mode 100644
index 000000000000..0896af5b9eff
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-field_find.txt
@@ -0,0 +1,118 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_find_common_field, tep_find_field, tep_find_any_field -
+Search for a field in an event.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+struct tep_format_field pass:[*]*tep_find_common_field*(struct tep_event pass:[*]_event_, const char pass:[*]_name_);
+struct tep_format_field pass:[*]*tep_find_field*(struct tep_event_ormat pass:[*]_event_, const char pass:[*]_name_);
+struct tep_format_field pass:[*]*tep_find_any_field*(struct tep_event pass:[*]_event_, const char pass:[*]_name_);
+--
+
+DESCRIPTION
+-----------
+These functions search for a field with given name in an event. The field
+returned can be used to find the field content from within a data record.
+
+The _tep_find_common_field()_ function searches for a common field with _name_
+in the _event_.
+
+The _tep_find_field()_ function searches for an event specific field with
+_name_ in the _event_.
+
+The _tep_find_any_field()_ function searches for any field with _name_ in the
+_event_.
+
+RETURN VALUE
+------------
+The _tep_find_common_field(), _tep_find_field()_ and _tep_find_any_field()_
+functions return a pointer to the found field, or NULL in case there is no field
+with the requested name.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+void get_htimer_info(struct tep_handle *tep, struct tep_record *record)
+{
+ struct tep_format_field *field;
+ struct tep_event *event;
+ long long softexpires;
+ int mode;
+ int pid;
+
+ event = tep_find_event_by_name(tep, "timer", "hrtimer_start");
+
+ field = tep_find_common_field(event, "common_pid");
+ if (field == NULL) {
+ /* Cannot find "common_pid" field in the event */
+ } else {
+ /* Get pid from the data record */
+ pid = tep_read_number(tep, record->data + field->offset,
+ field->size);
+ }
+
+ field = tep_find_field(event, "softexpires");
+ if (field == NULL) {
+ /* Cannot find "softexpires" event specific field in the event */
+ } else {
+ /* Get softexpires parameter from the data record */
+ softexpires = tep_read_number(tep, record->data + field->offset,
+ field->size);
+ }
+
+ field = tep_find_any_field(event, "mode");
+ if (field == NULL) {
+ /* Cannot find "mode" field in the event */
+ } else
+ {
+ /* Get mode parameter from the data record */
+ mode = tep_read_number(tep, record->data + field->offset,
+ field->size);
+ }
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-field_get_val.txt b/tools/lib/traceevent/Documentation/libtraceevent-field_get_val.txt
new file mode 100644
index 000000000000..6324f0d48aeb
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-field_get_val.txt
@@ -0,0 +1,122 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_get_any_field_val, tep_get_common_field_val, tep_get_field_val,
+tep_get_field_raw - Get value of a field.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+*#include <trace-seq.h>*
+
+int *tep_get_any_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
+int *tep_get_common_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
+int *tep_get_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
+void pass:[*]*tep_get_field_raw*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int pass:[*]_len_, int _err_);
+--
+
+DESCRIPTION
+-----------
+These functions can be used to find a field and retrieve its value.
+
+The _tep_get_any_field_val()_ function searches in the _record_ for a field
+with _name_, part of the _event_. If the field is found, its value is stored in
+_val_. If there is an error and _err_ is not zero, then an error string is
+written into _s_.
+
+The _tep_get_common_field_val()_ function does the same as
+_tep_get_any_field_val()_, but searches only in the common fields. This works
+for any event as all events include the common fields.
+
+The _tep_get_field_val()_ function does the same as _tep_get_any_field_val()_,
+but searches only in the event specific fields.
+
+The _tep_get_field_raw()_ function searches in the _record_ for a field with
+_name_, part of the _event_. If the field is found, a pointer to where the field
+exists in the record's raw data is returned. The size of the data is stored in
+_len_. If there is an error and _err_ is not zero, then an error string is
+written into _s_.
+
+RETURN VALUE
+------------
+The _tep_get_any_field_val()_, _tep_get_common_field_val()_ and
+_tep_get_field_val()_ functions return 0 on success, or -1 in case of an error.
+
+The _tep_get_field_raw()_ function returns a pointer to field's raw data, and
+places the length of this data in _len_. In case of an error NULL is returned.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+#include <trace-seq.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+struct tep_event *event = tep_find_event_by_name(tep, "kvm", "kvm_exit");
+...
+void process_record(struct tep_record *record)
+{
+ int len;
+ char *comm;
+ struct tep_event_format *event;
+ unsigned long long val;
+
+ event = tep_find_event_by_record(pevent, record);
+ if (event != NULL) {
+ if (tep_get_common_field_val(NULL, event, "common_type",
+ record, &val, 0) == 0) {
+ /* Got the value of common type field */
+ }
+ if (tep_get_field_val(NULL, event, "pid", record, &val, 0) == 0) {
+ /* Got the value of pid specific field */
+ }
+ comm = tep_get_field_raw(NULL, event, "comm", record, &len, 0);
+ if (comm != NULL) {
+ /* Got a pointer to the comm event specific field */
+ }
+ }
+}
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*trace-seq.h*
+ Header file to include in order to have access to trace sequences
+ related APIs. Trace sequences are used to allow a function to call
+ several other functions to create a string of data to use.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-field_print.txt b/tools/lib/traceevent/Documentation/libtraceevent-field_print.txt
new file mode 100644
index 000000000000..9a9df98ac44d
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-field_print.txt
@@ -0,0 +1,126 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_print_field, tep_print_fields, tep_print_num_field, tep_print_func_field -
+Print the field content.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+*#include <trace-seq.h>*
+
+void *tep_print_field*(struct trace_seq pass:[*]_s_, void pass:[*]_data_, struct tep_format_field pass:[*]_field_);
+void *tep_print_fields*(struct trace_seq pass:[*]_s_, void pass:[*]_data_, int _size_, struct tep_event pass:[*]_event_);
+int *tep_print_num_field*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int _err_);
+int *tep_print_func_field*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int _err_);
+--
+
+DESCRIPTION
+-----------
+These functions print recorded field's data, according to the field's type.
+
+The _tep_print_field()_ function extracts from the recorded raw _data_ value of
+the _field_ and prints it into _s_, according to the field type.
+
+The _tep_print_fields()_ prints each field name followed by the record's field
+value according to the field's type:
+[verse]
+--
+"field1_name=field1_value field2_name=field2_value ..."
+--
+It iterates all fields of the _event_, and calls _tep_print_field()_ for each of
+them.
+
+The _tep_print_num_field()_ function prints a numeric field with given format
+string. A search is performed in the _event_ for a field with _name_. If such
+field is found, its value is extracted from the _record_ and is printed in the
+_s_, according to the given format string _fmt_. If the argument _err_ is
+non-zero, and an error occures - it is printed in the _s_.
+
+The _tep_print_func_field()_ function prints a function field with given format
+string. A search is performed in the _event_ for a field with _name_. If such
+field is found, its value is extracted from the _record_. The value is assumed
+to be a function address, and a search is perform to find the name of this
+function. The function name (if found) and its address are printed in the _s_,
+according to the given format string _fmt_. If the argument _err_ is non-zero,
+and an error occures - it is printed in _s_.
+
+RETURN VALUE
+------------
+The _tep_print_num_field()_ and _tep_print_func_field()_ functions return 1
+on success, -1 in case of an error or 0 if the print buffer _s_ is full.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+#include <trace-seq.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+struct trace_seq seq;
+trace_seq_init(&seq);
+struct tep_event *event = tep_find_event_by_name(tep, "timer", "hrtimer_start");
+...
+void process_record(struct tep_record *record)
+{
+ struct tep_format_field *field_pid = tep_find_common_field(event, "common_pid");
+
+ trace_seq_reset(&seq);
+
+ /* Print the value of "common_pid" */
+ tep_print_field(&seq, record->data, field_pid);
+
+ /* Print all fields of the "hrtimer_start" event */
+ tep_print_fields(&seq, record->data, record->size, event);
+
+ /* Print the value of "expires" field with custom format string */
+ tep_print_num_field(&seq, " timer expires in %llu ", event, "expires", record, 0);
+
+ /* Print the address and the name of "function" field with custom format string */
+ tep_print_func_field(&seq, " timer function is %s ", event, "function", record, 0);
+ }
+ ...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*trace-seq.h*
+ Header file to include in order to have access to trace sequences related APIs.
+ Trace sequences are used to allow a function to call several other functions
+ to create a string of data to use.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-field_read.txt b/tools/lib/traceevent/Documentation/libtraceevent-field_read.txt
new file mode 100644
index 000000000000..64e9e25d3fd9
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-field_read.txt
@@ -0,0 +1,81 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_read_number_field - Reads a number from raw data.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+int *tep_read_number_field*(struct tep_format_field pass:[*]_field_, const void pass:[*]_data_, unsigned long long pass:[*]_value_);
+--
+
+DESCRIPTION
+-----------
+The _tep_read_number_field()_ function reads the value of the _field_ from the
+raw _data_ and stores it in the _value_. The function sets the _value_ according
+to the endianness of the raw data and the current machine and stores it in
+_value_.
+
+RETURN VALUE
+------------
+The _tep_read_number_field()_ function retunrs 0 in case of success, or -1 in
+case of an error.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+struct tep_event *event = tep_find_event_by_name(tep, "timer", "hrtimer_start");
+...
+void process_record(struct tep_record *record)
+{
+ unsigned long long pid;
+ struct tep_format_field *field_pid = tep_find_common_field(event, "common_pid");
+
+ if (tep_read_number_field(field_pid, record->data, &pid) != 0) {
+ /* Failed to get "common_pid" value */
+ }
+}
+...
+--
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-fields.txt b/tools/lib/traceevent/Documentation/libtraceevent-fields.txt
new file mode 100644
index 000000000000..1ccb531d5114
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-fields.txt
@@ -0,0 +1,105 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_event_common_fields, tep_event_fields - Get a list of fields for an event.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+struct tep_format_field pass:[*]pass:[*]*tep_event_common_fields*(struct tep_event pass:[*]_event_);
+struct tep_format_field pass:[*]pass:[*]*tep_event_fields*(struct tep_event pass:[*]_event_);
+--
+
+DESCRIPTION
+-----------
+The _tep_event_common_fields()_ function returns an array of pointers to common
+fields for the _event_. The array is allocated in the function and must be freed
+by free(). The last element of the array is NULL.
+
+The _tep_event_fields()_ function returns an array of pointers to event specific
+fields for the _event_. The array is allocated in the function and must be freed
+by free(). The last element of the array is NULL.
+
+RETURN VALUE
+------------
+Both _tep_event_common_fields()_ and _tep_event_fields()_ functions return
+an array of pointers to tep_format_field structures in case of success, or
+NULL in case of an error.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+int i;
+struct tep_format_field **fields;
+struct tep_event *event = tep_find_event_by_name(tep, "kvm", "kvm_exit");
+if (event != NULL) {
+ fields = tep_event_common_fields(event);
+ if (fields != NULL) {
+ i = 0;
+ while (fields[i]) {
+ /*
+ walk through the list of the common fields
+ of the kvm_exit event
+ */
+ i++;
+ }
+ free(fields);
+ }
+ fields = tep_event_fields(event);
+ if (fields != NULL) {
+ i = 0;
+ while (fields[i]) {
+ /*
+ walk through the list of the event specific
+ fields of the kvm_exit event
+ */
+ i++;
+ }
+ free(fields);
+ }
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-file_endian.txt b/tools/lib/traceevent/Documentation/libtraceevent-file_endian.txt
new file mode 100644
index 000000000000..f401ad311047
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-file_endian.txt
@@ -0,0 +1,91 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_is_file_bigendian, tep_set_file_bigendian - Get / set the endianness of the
+raw data being accessed by the tep handler.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+enum *tep_endian* {
+ TEP_LITTLE_ENDIAN = 0,
+ TEP_BIG_ENDIAN
+};
+
+bool *tep_is_file_bigendian*(struct tep_handle pass:[*]_tep_);
+void *tep_set_file_bigendian*(struct tep_handle pass:[*]_tep_, enum tep_endian _endian_);
+
+--
+DESCRIPTION
+-----------
+The _tep_is_file_bigendian()_ function gets the endianness of the raw data,
+being accessed by the tep handler. The _tep_ argument is trace event parser
+context.
+
+The _tep_set_file_bigendian()_ function sets the endianness of raw data being
+accessed by the tep handler. The _tep_ argument is trace event parser context.
+[verse]
+--
+The _endian_ argument is the endianness:
+ _TEP_LITTLE_ENDIAN_ - the raw data is in little endian format,
+ _TEP_BIG_ENDIAN_ - the raw data is in big endian format.
+--
+RETURN VALUE
+------------
+The _tep_is_file_bigendian()_ function returns true if the data is in bigendian
+format, false otherwise.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+ tep_set_file_bigendian(tep, TEP_LITTLE_ENDIAN);
+...
+ if (tep_is_file_bigendian(tep)) {
+ /* The raw data is in big endian */
+ } else {
+ /* The raw data is in little endian */
+ }
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-filter.txt b/tools/lib/traceevent/Documentation/libtraceevent-filter.txt
new file mode 100644
index 000000000000..4a9962d8cb59
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-filter.txt
@@ -0,0 +1,209 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_filter_alloc, tep_filter_free, tep_filter_reset, tep_filter_make_string,
+tep_filter_copy, tep_filter_compare, tep_filter_match, tep_event_filtered,
+tep_filter_remove_event, tep_filter_strerror, tep_filter_add_filter_str -
+Event filter related APIs.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+struct tep_event_filter pass:[*]*tep_filter_alloc*(struct tep_handle pass:[*]_tep_);
+void *tep_filter_free*(struct tep_event_filter pass:[*]_filter_);
+void *tep_filter_reset*(struct tep_event_filter pass:[*]_filter_);
+enum tep_errno *tep_filter_add_filter_str*(struct tep_event_filter pass:[*]_filter_, const char pass:[*]_filter_str_);
+int *tep_event_filtered*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
+int *tep_filter_remove_event*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
+enum tep_errno *tep_filter_match*(struct tep_event_filter pass:[*]_filter_, struct tep_record pass:[*]_record_);
+int *tep_filter_copy*(struct tep_event_filter pass:[*]_dest_, struct tep_event_filter pass:[*]_source_);
+int *tep_filter_compare*(struct tep_event_filter pass:[*]_filter1_, struct tep_event_filter pass:[*]_filter2_);
+char pass:[*]*tep_filter_make_string*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
+int *tep_filter_strerror*(struct tep_event_filter pass:[*]_filter_, enum tep_errno _err_, char pass:[*]buf, size_t _buflen_);
+--
+
+DESCRIPTION
+-----------
+Filters can be attached to traced events. They can be used to filter out various
+events when outputting them. Each event can be filtered based on its parameters,
+described in the event's format file. This set of functions can be used to
+create, delete, modify and attach event filters.
+
+The _tep_filter_alloc()_ function creates a new event filter. The _tep_ argument
+is the trace event parser context.
+
+The _tep_filter_free()_ function frees an event filter and all resources that it
+had used.
+
+The _tep_filter_reset()_ function removes all rules from an event filter and
+resets it.
+
+The _tep_filter_add_filter_str()_ function adds a new rule to the _filter_. The
+_filter_str_ argument is the filter string, that contains the rule.
+
+The _tep_event_filtered()_ function checks if the event with _event_id_ has
+_filter_.
+
+The _tep_filter_remove_event()_ function removes a _filter_ for an event with
+_event_id_.
+
+The _tep_filter_match()_ function tests if a _record_ matches given _filter_.
+
+The _tep_filter_copy()_ function copies a _source_ filter into a _dest_ filter.
+
+The _tep_filter_compare()_ function compares two filers - _filter1_ and _filter2_.
+
+The _tep_filter_make_string()_ function constructs a string, displaying
+the _filter_ contents for given _event_id_.
+
+The _tep_filter_strerror()_ function copies the _filter_ error buffer into the
+given _buf_ with the size _buflen_. If the error buffer is empty, in the _buf_
+is copied a string, describing the error _err_.
+
+RETURN VALUE
+------------
+The _tep_filter_alloc()_ function returns a pointer to the newly created event
+filter, or NULL in case of an error.
+
+The _tep_filter_add_filter_str()_ function returns 0 if the rule was
+successfully added or a negative error code. Use _tep_filter_strerror()_ to see
+actual error message in case of an error.
+
+The _tep_event_filtered()_ function returns 1 if the filter is found for given
+event, or 0 otherwise.
+
+The _tep_filter_remove_event()_ function returns 1 if the vent was removed, or
+0 if the event was not found.
+
+The _tep_filter_match()_ function returns _tep_errno_, according to the result:
+[verse]
+--
+_pass:[TEP_ERRNO__FILTER_MATCH]_ - filter found for event, the record matches.
+_pass:[TEP_ERRNO__FILTER_MISS]_ - filter found for event, the record does not match.
+_pass:[TEP_ERRNO__FILTER_NOT_FOUND]_ - no filter found for record's event.
+_pass:[TEP_ERRNO__NO_FILTER]_ - no rules in the filter.
+--
+or any other _tep_errno_, if an error occurred during the test.
+
+The _tep_filter_copy()_ function returns 0 on success or -1 if not all rules
+ were copied.
+
+The _tep_filter_compare()_ function returns 1 if the two filters hold the same
+content, or 0 if they do not.
+
+The _tep_filter_make_string()_ function returns a string, which must be freed
+with free(), or NULL in case of an error.
+
+The _tep_filter_strerror()_ function returns 0 if message was filled
+successfully, or -1 in case of an error.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+char errstr[200];
+int ret;
+
+struct tep_event_filter *filter = tep_filter_alloc(tep);
+struct tep_event_filter *filter1 = tep_filter_alloc(tep);
+ret = tep_filter_add_filter_str(filter, "sched/sched_wakeup:target_cpu==1");
+if(ret < 0) {
+ tep_filter_strerror(filter, ret, errstr, sizeof(errstr));
+ /* Failed to add a new rule to the filter, the error string is in errstr */
+}
+if (tep_filter_copy(filter1, filter) != 0) {
+ /* Failed to copy filter in filter1 */
+}
+...
+if (tep_filter_compare(filter, filter1) != 1) {
+ /* Both filters are different */
+}
+...
+void process_record(struct tep_handle *tep, struct tep_record *record)
+{
+ struct tep_event *event;
+ char *fstring;
+
+ event = tep_find_event_by_record(tep, record);
+
+ if (tep_event_filtered(filter, event->id) == 1) {
+ /* The event has filter */
+ fstring = tep_filter_make_string(filter, event->id);
+ if (fstring != NULL) {
+ /* The filter for the event is in fstring */
+ free(fstring);
+ }
+ }
+
+ switch (tep_filter_match(filter, record)) {
+ case TEP_ERRNO__FILTER_MATCH:
+ /* The filter matches the record */
+ break;
+ case TEP_ERRNO__FILTER_MISS:
+ /* The filter does not match the record */
+ break;
+ case TEP_ERRNO__FILTER_NOT_FOUND:
+ /* No filter found for record's event */
+ break;
+ case TEP_ERRNO__NO_FILTER:
+ /* There are no rules in the filter */
+ break
+ default:
+ /* An error occurred during the test */
+ break;
+ }
+
+ if (tep_filter_remove_event(filter, event->id) == 1) {
+ /* The event was removed from the filter */
+ }
+}
+
+...
+tep_filter_reset(filter);
+...
+tep_filter_free(filter);
+tep_filter_free(filter1);
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt b/tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt
new file mode 100644
index 000000000000..38bfea30a5f6
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt
@@ -0,0 +1,183 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_find_function, tep_find_function_address, tep_set_function_resolver,
+tep_reset_function_resolver, tep_register_function, tep_register_print_string -
+function related tep APIs
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+typedef char pass:[*](*tep_func_resolver_t*)(void pass:[*]_priv_, unsigned long long pass:[*]_addrp_, char pass:[**]_modp_);
+int *tep_set_function_resolver*(struct tep_handle pass:[*]_tep_, tep_func_resolver_t pass:[*]_func_, void pass:[*]_priv_);
+void *tep_reset_function_resolver*(struct tep_handle pass:[*]_tep_);
+const char pass:[*]*tep_find_function*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
+unsigned long long *tep_find_function_address*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
+int *tep_register_function*(struct tep_handle pass:[*]_tep_, char pass:[*]_name_, unsigned long long _addr_, char pass:[*]_mod_);
+int *tep_register_print_string*(struct tep_handle pass:[*]_tep_, const char pass:[*]_fmt_, unsigned long long _addr_);
+--
+
+DESCRIPTION
+-----------
+Some tools may have already a way to resolve the kernel functions. These APIs
+allow them to keep using it instead of duplicating all the entries inside.
+
+The _tep_func_resolver_t_ type is the prototype of the alternative kernel
+functions resolver. This function receives a pointer to its custom context
+(set with the _tep_set_function_resolver()_ call ) and the address of a kernel
+function, which has to be resolved. In case of success, it should return
+the name of the function and its module (if any) in _modp_.
+
+The _tep_set_function_resolver()_ function registers _func_ as an alternative
+kernel functions resolver. The _tep_ argument is trace event parser context.
+The _priv_ argument is a custom context of the _func_ function. The function
+resolver is used by the APIs _tep_find_function()_,
+_tep_find_function_address()_, and _tep_print_func_field()_ to resolve
+a function address to a function name.
+
+The _tep_reset_function_resolver()_ function resets the kernel functions
+resolver to the default function. The _tep_ argument is trace event parser
+context.
+
+
+These APIs can be used to find function name and start address, by given
+address. The given address does not have to be exact, it will select
+the function that would contain it.
+
+The _tep_find_function()_ function returns the function name, which contains the
+given address _addr_. The _tep_ argument is the trace event parser context.
+
+The _tep_find_function_address()_ function returns the function start address,
+by given address _addr_. The _addr_ does not have to be exact, it will select
+the function that would contain it. The _tep_ argument is the trace event
+parser context.
+
+The _tep_register_function()_ function registers a function name mapped to an
+address and (optional) module. This mapping is used in case the function tracer
+or events have "%pF" or "%pS" parameter in its format string. It is common to
+pass in the kallsyms function names with their corresponding addresses with this
+function. The _tep_ argument is the trace event parser context. The _name_ is
+the name of the function, the string is copied internally. The _addr_ is
+the start address of the function. The _mod_ is the kernel module
+the function may be in (NULL for none).
+
+The _tep_register_print_string()_ function registers a string by the address
+it was stored in the kernel. Some strings internal to the kernel with static
+address are passed to certain events. The "%s" in the event's format field
+which has an address needs to know what string would be at that address. The
+tep_register_print_string() supplies the parsing with the mapping between kernel
+addresses and those strings. The _tep_ argument is the trace event parser
+context. The _fmt_ is the string to register, it is copied internally.
+The _addr_ is the address the string was located at.
+
+
+RETURN VALUE
+------------
+The _tep_set_function_resolver()_ function returns 0 in case of success, or -1
+in case of an error.
+
+The _tep_find_function()_ function returns the function name, or NULL in case
+it cannot be found.
+
+The _tep_find_function_address()_ function returns the function start address,
+or 0 in case it cannot be found.
+
+The _tep_register_function()_ function returns 0 in case of success. In case of
+an error -1 is returned, and errno is set to the appropriate error number.
+
+The _tep_register_print_string()_ function returns 0 in case of success. In case
+of an error -1 is returned, and errno is set to the appropriate error number.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+char *my_resolve_kernel_addr(void *context,
+ unsigned long long *addrp, char **modp)
+{
+ struct db *function_database = context;
+ struct symbol *sym = sql_lookup(function_database, *addrp);
+
+ if (!sym)
+ return NULL;
+
+ *modp = sym->module_name;
+ return sym->name;
+}
+
+void show_function( unsigned long long addr)
+{
+ unsigned long long fstart;
+ const char *fname;
+
+ if (tep_set_function_resolver(tep, my_resolve_kernel_addr,
+ function_database) != 0) {
+ /* failed to register my_resolve_kernel_addr */
+ }
+
+ /* These APIs use my_resolve_kernel_addr() to resolve the addr */
+ fname = tep_find_function(tep, addr);
+ fstart = tep_find_function_address(tep, addr);
+
+ /*
+ addr is in function named fname, starting at fstart address,
+ at offset (addr - fstart)
+ */
+
+ tep_reset_function_resolver(tep);
+
+}
+...
+ if (tep_register_function(tep, "kvm_exit",
+ (unsigned long long) 0x12345678, "kvm") != 0) {
+ /* Failed to register kvm_exit address mapping */
+ }
+...
+ if (tep_register_print_string(tep, "print string",
+ (unsigned long long) 0x87654321, NULL) != 0) {
+ /* Failed to register "print string" address mapping */
+ }
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-func_find.txt b/tools/lib/traceevent/Documentation/libtraceevent-func_find.txt
new file mode 100644
index 000000000000..04840e244445
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-func_find.txt
@@ -0,0 +1,88 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_find_function,tep_find_function_address - Find function name / start address.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+const char pass:[*]*tep_find_function*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
+unsigned long long *tep_find_function_address*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
+--
+
+DESCRIPTION
+-----------
+These functions can be used to find function name and start address, by given
+address. The given address does not have to be exact, it will select the function
+that would contain it.
+
+The _tep_find_function()_ function returns the function name, which contains the
+given address _addr_. The _tep_ argument is the trace event parser context.
+
+The _tep_find_function_address()_ function returns the function start address,
+by given address _addr_. The _addr_ does not have to be exact, it will select the
+function that would contain it. The _tep_ argument is the trace event parser context.
+
+RETURN VALUE
+------------
+The _tep_find_function()_ function returns the function name, or NULL in case
+it cannot be found.
+
+The _tep_find_function_address()_ function returns the function start address,
+or 0 in case it cannot be found.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+void show_function( unsigned long long addr)
+{
+ const char *fname = tep_find_function(tep, addr);
+ unsigned long long fstart = tep_find_function_address(tep, addr);
+
+ /* addr is in function named fname, starting at fstart address, at offset (addr - fstart) */
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-handle.txt b/tools/lib/traceevent/Documentation/libtraceevent-handle.txt
new file mode 100644
index 000000000000..8d568316847d
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-handle.txt
@@ -0,0 +1,101 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_alloc, tep_free,tep_ref, tep_unref,tep_ref_get - Create, destroy, manage
+references of trace event parser context.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+struct tep_handle pass:[*]*tep_alloc*(void);
+void *tep_free*(struct tep_handle pass:[*]_tep_);
+void *tep_ref*(struct tep_handle pass:[*]_tep_);
+void *tep_unref*(struct tep_handle pass:[*]_tep_);
+int *tep_ref_get*(struct tep_handle pass:[*]_tep_);
+--
+
+DESCRIPTION
+-----------
+These are the main functions to create and destroy tep_handle - the main
+structure, representing the trace event parser context. This context is used as
+the input parameter of most library APIs.
+
+The _tep_alloc()_ function allocates and initializes the tep context.
+
+The _tep_free()_ function will decrement the reference of the _tep_ handler.
+When there is no more references, then it will free the handler, as well
+as clean up all its resources that it had used. The argument _tep_ is
+the pointer to the trace event parser context.
+
+The _tep_ref()_ function adds a reference to the _tep_ handler.
+
+The _tep_unref()_ function removes a reference from the _tep_ handler. When
+the last reference is removed, the _tep_ is destroyed, and all resources that
+it had used are cleaned up.
+
+The _tep_ref_get()_ functions gets the current references of the _tep_ handler.
+
+RETURN VALUE
+------------
+_tep_alloc()_ returns a pointer to a newly created tep_handle structure.
+NULL is returned in case there is not enough free memory to allocate it.
+
+_tep_ref_get()_ returns the current references of _tep_.
+If _tep_ is NULL, 0 is returned.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+
+...
+struct tep_handle *tep = tep_alloc();
+...
+int ref = tep_ref_get(tep);
+tep_ref(tep);
+if ( (ref+1) != tep_ref_get(tep)) {
+ /* Something wrong happened, the counter is not incremented by 1 */
+}
+tep_unref(tep);
+...
+tep_free(tep);
+...
+--
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-header_page.txt b/tools/lib/traceevent/Documentation/libtraceevent-header_page.txt
new file mode 100644
index 000000000000..615d117dc39f
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-header_page.txt
@@ -0,0 +1,102 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_get_header_page_size, tep_get_header_timestamp_size, tep_is_old_format -
+Get the data stored in the header page, in kernel context.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+int *tep_get_header_page_size*(struct tep_handle pass:[*]_tep_);
+int *tep_get_header_timestamp_size*(struct tep_handle pass:[*]_tep_);
+bool *tep_is_old_format*(struct tep_handle pass:[*]_tep_);
+--
+DESCRIPTION
+-----------
+These functions retrieve information from kernel context, stored in tracefs
+events/header_page. Old kernels do not have header page info, so default values
+from user space context are used.
+
+The _tep_get_header_page_size()_ function returns the size of a long integer,
+in kernel context. The _tep_ argument is trace event parser context.
+This information is retrieved from tracefs events/header_page, "commit" field.
+
+The _tep_get_header_timestamp_size()_ function returns the size of timestamps,
+in kernel context. The _tep_ argument is trace event parser context. This
+information is retrieved from tracefs events/header_page, "timestamp" field.
+
+The _tep_is_old_format()_ function returns true if the kernel predates
+the addition of events/header_page, otherwise it returns false.
+
+RETURN VALUE
+------------
+The _tep_get_header_page_size()_ function returns the size of a long integer,
+in bytes.
+
+The _tep_get_header_timestamp_size()_ function returns the size of timestamps,
+in bytes.
+
+The _tep_is_old_format()_ function returns true, if an old kernel is used to
+generate the tracing data, which has no event/header_page. If the kernel is new,
+or _tep_ is NULL, false is returned.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+ int longsize;
+ int timesize;
+ bool old;
+
+ longsize = tep_get_header_page_size(tep);
+ timesize = tep_get_header_timestamp_size(tep);
+ old = tep_is_old_format(tep);
+
+ printf ("%s kernel is used to generate the tracing data.\n",
+ old?"Old":"New");
+ printf("The size of a long integer is %d bytes.\n", longsize);
+ printf("The timestamps size is %d bytes.\n", timesize);
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-host_endian.txt b/tools/lib/traceevent/Documentation/libtraceevent-host_endian.txt
new file mode 100644
index 000000000000..d5d375eb8d1e
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-host_endian.txt
@@ -0,0 +1,104 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_is_bigendian, tep_is_local_bigendian, tep_set_local_bigendian - Get / set
+the endianness of the local machine.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+enum *tep_endian* {
+ TEP_LITTLE_ENDIAN = 0,
+ TEP_BIG_ENDIAN
+};
+
+int *tep_is_bigendian*(void);
+bool *tep_is_local_bigendian*(struct tep_handle pass:[*]_tep_);
+void *tep_set_local_bigendian*(struct tep_handle pass:[*]_tep_, enum tep_endian _endian_);
+--
+
+DESCRIPTION
+-----------
+
+The _tep_is_bigendian()_ gets the endianness of the machine, executing
+the function.
+
+The _tep_is_local_bigendian()_ function gets the endianness of the local
+machine, saved in the _tep_ handler. The _tep_ argument is the trace event
+parser context. This API is a bit faster than _tep_is_bigendian()_, as it
+returns cached endianness of the local machine instead of checking it each time.
+
+The _tep_set_local_bigendian()_ function sets the endianness of the local
+machine in the _tep_ handler. The _tep_ argument is trace event parser context.
+The _endian_ argument is the endianness:
+[verse]
+--
+ _TEP_LITTLE_ENDIAN_ - the machine is little endian,
+ _TEP_BIG_ENDIAN_ - the machine is big endian.
+--
+
+RETURN VALUE
+------------
+The _tep_is_bigendian()_ function returns non zero if the endianness of the
+machine, executing the code, is big endian and zero otherwise.
+
+The _tep_is_local_bigendian()_ function returns true, if the endianness of the
+local machine, saved in the _tep_ handler, is big endian, or false otherwise.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+ if (tep_is_bigendian())
+ tep_set_local_bigendian(tep, TEP_BIG_ENDIAN);
+ else
+ tep_set_local_bigendian(tep, TEP_LITTLE_ENDIAN);
+...
+ if (tep_is_local_bigendian(tep))
+ printf("This machine you are running on is bigendian\n");
+ else
+ printf("This machine you are running on is little endian\n");
+
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-long_size.txt b/tools/lib/traceevent/Documentation/libtraceevent-long_size.txt
new file mode 100644
index 000000000000..01d78ea2519a
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-long_size.txt
@@ -0,0 +1,78 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_get_long_size, tep_set_long_size - Get / set the size of a long integer on
+the machine, where the trace is generated, in bytes
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+int *tep_get_long_size*(strucqt tep_handle pass:[*]_tep_);
+void *tep_set_long_size*(struct tep_handle pass:[*]_tep_, int _long_size_);
+--
+
+DESCRIPTION
+-----------
+The _tep_get_long_size()_ function returns the size of a long integer on the machine,
+where the trace is generated. The _tep_ argument is trace event parser context.
+
+The _tep_set_long_size()_ function sets the size of a long integer on the machine,
+where the trace is generated. The _tep_ argument is trace event parser context.
+The _long_size_ is the size of a long integer, in bytes.
+
+RETURN VALUE
+------------
+The _tep_get_long_size()_ function returns the size of a long integer on the machine,
+where the trace is generated, in bytes.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+tep_set_long_size(tep, 4);
+...
+int long_size = tep_get_long_size(tep);
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-page_size.txt b/tools/lib/traceevent/Documentation/libtraceevent-page_size.txt
new file mode 100644
index 000000000000..452c0cfa1822
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-page_size.txt
@@ -0,0 +1,82 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_get_page_size, tep_set_page_size - Get / set the size of a memory page on
+the machine, where the trace is generated
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+int *tep_get_page_size*(struct tep_handle pass:[*]_tep_);
+void *tep_set_page_size*(struct tep_handle pass:[*]_tep_, int _page_size_);
+--
+
+DESCRIPTION
+-----------
+The _tep_get_page_size()_ function returns the size of a memory page on
+the machine, where the trace is generated. The _tep_ argument is trace
+event parser context.
+
+The _tep_set_page_size()_ function stores in the _tep_ context the size of a
+memory page on the machine, where the trace is generated.
+The _tep_ argument is trace event parser context.
+The _page_size_ argument is the size of a memory page, in bytes.
+
+RETURN VALUE
+------------
+The _tep_get_page_size()_ function returns size of the memory page, in bytes.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <unistd.h>
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+ int page_size = getpagesize();
+
+ tep_set_page_size(tep, page_size);
+
+ printf("The page size for this machine is %d\n", tep_get_page_size(tep));
+
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-parse_event.txt b/tools/lib/traceevent/Documentation/libtraceevent-parse_event.txt
new file mode 100644
index 000000000000..f248114ca1ff
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-parse_event.txt
@@ -0,0 +1,90 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_parse_event, tep_parse_format - Parse the event format information
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+enum tep_errno *tep_parse_event*(struct tep_handle pass:[*]_tep_, const char pass:[*]_buf_, unsigned long _size_, const char pass:[*]_sys_);
+enum tep_errno *tep_parse_format*(struct tep_handle pass:[*]_tep_, struct tep_event pass:[*]pass:[*]_eventp_, const char pass:[*]_buf_, unsigned long _size_, const char pass:[*]_sys_);
+--
+
+DESCRIPTION
+-----------
+The _tep_parse_event()_ function parses the event format and creates an event
+structure to quickly parse raw data for a given event. The _tep_ argument is
+the trace event parser context. The created event structure is stored in the
+_tep_ context. The _buf_ argument is a buffer with _size_, where the event
+format data is. The event format data can be taken from
+tracefs/events/.../.../format files. The _sys_ argument is the system of
+the event.
+
+The _tep_parse_format()_ function does the same as _tep_parse_event()_. The only
+difference is in the extra _eventp_ argument, where the newly created event
+structure is returned.
+
+RETURN VALUE
+------------
+Both _tep_parse_event()_ and _tep_parse_format()_ functions return 0 on success,
+or TEP_ERRNO__... in case of an error.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+char *buf;
+int size;
+struct tep_event *event = NULL;
+buf = read_file("/sys/kernel/tracing/events/ftrace/print/format", &size);
+if (tep_parse_event(tep, buf, size, "ftrace") != 0) {
+ /* Failed to parse the ftrace print format */
+}
+
+if (tep_parse_format(tep, &event, buf, size, "ftrace") != 0) {
+ /* Failed to parse the ftrace print format */
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-parse_head.txt b/tools/lib/traceevent/Documentation/libtraceevent-parse_head.txt
new file mode 100644
index 000000000000..c90f16c7d8e6
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-parse_head.txt
@@ -0,0 +1,82 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_parse_header_page - Parses the data stored in the header page.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+int *tep_parse_header_page*(struct tep_handle pass:[*]_tep_, char pass:[*]_buf_, unsigned long _size_, int _long_size_);
+--
+
+DESCRIPTION
+-----------
+The _tep_parse_header_page()_ function parses the header page data from _buf_,
+and initializes the _tep_, trace event parser context, with it. The buffer
+_buf_ is with _size_, and is supposed to be copied from
+tracefs/events/header_page.
+
+Some old kernels do not have header page info, in this case the
+_tep_parse_header_page()_ function can be called with _size_ equal to 0. The
+_tep_ context is initialized with default values. The _long_size_ can be used in
+this use case, to set the size of a long integer to be used.
+
+RETURN VALUE
+------------
+The _tep_parse_header_page()_ function returns 0 in case of success, or -1
+in case of an error.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+char *buf;
+int size;
+buf = read_file("/sys/kernel/tracing/events/header_page", &size);
+if (tep_parse_header_page(tep, buf, size, sizeof(unsigned long)) != 0) {
+ /* Failed to parse the header page */
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-record_parse.txt b/tools/lib/traceevent/Documentation/libtraceevent-record_parse.txt
new file mode 100644
index 000000000000..e9a69116c78b
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-record_parse.txt
@@ -0,0 +1,137 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_data_type, tep_data_pid,tep_data_preempt_count, tep_data_flags -
+Extract common fields from a record.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+enum *trace_flag_type* {
+ _TRACE_FLAG_IRQS_OFF_,
+ _TRACE_FLAG_IRQS_NOSUPPORT_,
+ _TRACE_FLAG_NEED_RESCHED_,
+ _TRACE_FLAG_HARDIRQ_,
+ _TRACE_FLAG_SOFTIRQ_,
+};
+
+int *tep_data_type*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
+int *tep_data_pid*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
+int *tep_data_preempt_count*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
+int *tep_data_flags*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
+--
+
+DESCRIPTION
+-----------
+This set of functions can be used to extract common fields from a record.
+
+The _tep_data_type()_ function gets the event id from the record _rec_.
+It reads the "common_type" field. The _tep_ argument is the trace event parser
+context.
+
+The _tep_data_pid()_ function gets the process id from the record _rec_.
+It reads the "common_pid" field. The _tep_ argument is the trace event parser
+context.
+
+The _tep_data_preempt_count()_ function gets the preemption count from the
+record _rec_. It reads the "common_preempt_count" field. The _tep_ argument is
+the trace event parser context.
+
+The _tep_data_flags()_ function gets the latency flags from the record _rec_.
+It reads the "common_flags" field. The _tep_ argument is the trace event parser
+context. Supported latency flags are:
+[verse]
+--
+ _TRACE_FLAG_IRQS_OFF_, Interrupts are disabled.
+ _TRACE_FLAG_IRQS_NOSUPPORT_, Reading IRQ flag is not supported by the architecture.
+ _TRACE_FLAG_NEED_RESCHED_, Task needs rescheduling.
+ _TRACE_FLAG_HARDIRQ_, Hard IRQ is running.
+ _TRACE_FLAG_SOFTIRQ_, Soft IRQ is running.
+--
+
+RETURN VALUE
+------------
+The _tep_data_type()_ function returns an integer, representing the event id.
+
+The _tep_data_pid()_ function returns an integer, representing the process id
+
+The _tep_data_preempt_count()_ function returns an integer, representing the
+preemption count.
+
+The _tep_data_flags()_ function returns an integer, representing the latency
+flags. Look at the _trace_flag_type_ enum for supported flags.
+
+All these functions in case of an error return a negative integer.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+void process_record(struct tep_record *record)
+{
+ int data;
+
+ data = tep_data_type(tep, record);
+ if (data >= 0) {
+ /* Got the ID of the event */
+ }
+
+ data = tep_data_pid(tep, record);
+ if (data >= 0) {
+ /* Got the process ID */
+ }
+
+ data = tep_data_preempt_count(tep, record);
+ if (data >= 0) {
+ /* Got the preemption count */
+ }
+
+ data = tep_data_flags(tep, record);
+ if (data >= 0) {
+ /* Got the latency flags */
+ }
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-reg_event_handler.txt b/tools/lib/traceevent/Documentation/libtraceevent-reg_event_handler.txt
new file mode 100644
index 000000000000..53d37d72a1c1
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-reg_event_handler.txt
@@ -0,0 +1,156 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_register_event_handler, tep_unregister_event_handler - Register /
+unregisters a callback function to parse an event information.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+enum *tep_reg_handler* {
+ _TEP_REGISTER_SUCCESS_,
+ _TEP_REGISTER_SUCCESS_OVERWRITE_,
+};
+
+int *tep_register_event_handler*(struct tep_handle pass:[*]_tep_, int _id_, const char pass:[*]_sys_name_, const char pass:[*]_event_name_, tep_event_handler_func _func_, void pass:[*]_context_);
+int *tep_unregister_event_handler*(struct tep_handle pass:[*]tep, int id, const char pass:[*]sys_name, const char pass:[*]event_name, tep_event_handler_func func, void pass:[*]_context_);
+
+typedef int (*pass:[*]tep_event_handler_func*)(struct trace_seq pass:[*]s, struct tep_record pass:[*]record, struct tep_event pass:[*]event, void pass:[*]context);
+--
+
+DESCRIPTION
+-----------
+The _tep_register_event_handler()_ function registers a handler function,
+which is going to be called to parse the information for a given event.
+The _tep_ argument is the trace event parser context. The _id_ argument is
+the id of the event. The _sys_name_ argument is the name of the system,
+the event belongs to. The _event_name_ argument is the name of the event.
+If _id_ is >= 0, it is used to find the event, otherwise _sys_name_ and
+_event_name_ are used. The _func_ is a pointer to the function, which is going
+to be called to parse the event information. The _context_ argument is a pointer
+to the context data, which will be passed to the _func_. If a handler function
+for the same event is already registered, it will be overridden with the new
+one. This mechanism allows a developer to override the parsing of a given event.
+If for some reason the default print format is not sufficient, the developer
+can register a function for an event to be used to parse the data instead.
+
+The _tep_unregister_event_handler()_ function unregisters the handler function,
+previously registered with _tep_register_event_handler()_. The _tep_ argument
+is the trace event parser context. The _id_, _sys_name_, _event_name_, _func_,
+and _context_ are the same arguments, as when the callback function _func_ was
+registered.
+
+The _tep_event_handler_func_ is the type of the custom event handler
+function. The _s_ argument is the trace sequence, it can be used to create a
+custom string, describing the event. A _record_ to get the event from is passed
+as input parameter and also the _event_ - the handle to the record's event. The
+_context_ is custom context, set when the custom event handler is registered.
+
+RETURN VALUE
+------------
+The _tep_register_event_handler()_ function returns _TEP_REGISTER_SUCCESS_
+if the new handler is registered successfully or
+_TEP_REGISTER_SUCCESS_OVERWRITE_ if an existing handler is overwritten.
+If there is not enough memory to complete the registration,
+TEP_ERRNO__MEM_ALLOC_FAILED is returned.
+
+The _tep_unregister_event_handler()_ function returns 0 if _func_ was removed
+successful or, -1 if the event was not found.
+
+The _tep_event_handler_func_ should return -1 in case of an error,
+or 0 otherwise.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+#include <trace-seq.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+int timer_expire_handler(struct trace_seq *s, struct tep_record *record,
+ struct tep_event *event, void *context)
+{
+ trace_seq_printf(s, "hrtimer=");
+
+ if (tep_print_num_field(s, "0x%llx", event, "timer", record, 0) == -1)
+ tep_print_num_field(s, "0x%llx", event, "hrtimer", record, 1);
+
+ trace_seq_printf(s, " now=");
+
+ tep_print_num_field(s, "%llu", event, "now", record, 1);
+
+ tep_print_func_field(s, " function=%s", event, "function", record, 0);
+
+ return 0;
+}
+...
+ int ret;
+
+ ret = tep_register_event_handler(tep, -1, "timer", "hrtimer_expire_entry",
+ timer_expire_handler, NULL);
+ if (ret < 0) {
+ char buf[32];
+
+ tep_strerror(tep, ret, buf, 32)
+ printf("Failed to register handler for hrtimer_expire_entry: %s\n", buf);
+ } else {
+ switch (ret) {
+ case TEP_REGISTER_SUCCESS:
+ printf ("Registered handler for hrtimer_expire_entry\n");
+ break;
+ case TEP_REGISTER_SUCCESS_OVERWRITE:
+ printf ("Overwrote handler for hrtimer_expire_entry\n");
+ break;
+ }
+ }
+...
+ ret = tep_unregister_event_handler(tep, -1, "timer", "hrtimer_expire_entry",
+ timer_expire_handler, NULL);
+ if ( ret )
+ printf ("Failed to unregister handler for hrtimer_expire_entry\n");
+
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*trace-seq.h*
+ Header file to include in order to have access to trace sequences
+ related APIs. Trace sequences are used to allow a function to call
+ several other functions to create a string of data to use.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-reg_print_func.txt b/tools/lib/traceevent/Documentation/libtraceevent-reg_print_func.txt
new file mode 100644
index 000000000000..708dce91ebd8
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-reg_print_func.txt
@@ -0,0 +1,155 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_register_print_function,tep_unregister_print_function -
+Registers / Unregisters a helper function.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+enum *tep_func_arg_type* {
+ TEP_FUNC_ARG_VOID,
+ TEP_FUNC_ARG_INT,
+ TEP_FUNC_ARG_LONG,
+ TEP_FUNC_ARG_STRING,
+ TEP_FUNC_ARG_PTR,
+ TEP_FUNC_ARG_MAX_TYPES
+};
+
+typedef unsigned long long (*pass:[*]tep_func_handler*)(struct trace_seq pass:[*]s, unsigned long long pass:[*]args);
+
+int *tep_register_print_function*(struct tep_handle pass:[*]_tep_, tep_func_handler _func_, enum tep_func_arg_type _ret_type_, char pass:[*]_name_, _..._);
+int *tep_unregister_print_function*(struct tep_handle pass:[*]_tep_, tep_func_handler _func_, char pass:[*]_name_);
+--
+
+DESCRIPTION
+-----------
+Some events may have helper functions in the print format arguments.
+This allows a plugin to dynamically create a way to process one of
+these functions.
+
+The _tep_register_print_function()_ registers such helper function. The _tep_
+argument is the trace event parser context. The _func_ argument is a pointer
+to the helper function. The _ret_type_ argument is the return type of the
+helper function, value from the _tep_func_arg_type_ enum. The _name_ is the name
+of the helper function, as seen in the print format arguments. The _..._ is a
+variable list of _tep_func_arg_type_ enums, the _func_ function arguments.
+This list must end with _TEP_FUNC_ARG_VOID_. See 'EXAMPLE' section.
+
+The _tep_unregister_print_function()_ unregisters a helper function, previously
+registered with _tep_register_print_function()_. The _tep_ argument is the
+trace event parser context. The _func_ and _name_ arguments are the same, used
+when the helper function was registered.
+
+The _tep_func_handler_ is the type of the helper function. The _s_ argument is
+the trace sequence, it can be used to create a custom string.
+The _args_ is a list of arguments, defined when the helper function was
+registered.
+
+RETURN VALUE
+------------
+The _tep_register_print_function()_ function returns 0 in case of success.
+In case of an error, TEP_ERRNO_... code is returned.
+
+The _tep_unregister_print_function()_ returns 0 in case of success, or -1 in
+case of an error.
+
+EXAMPLE
+-------
+Some events have internal functions calls, that appear in the print format
+output. For example "tracefs/events/i915/g4x_wm/format" has:
+[source,c]
+--
+print fmt: "pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s",
+ ((REC->pipe) + 'A'), REC->frame, REC->scanline, REC->primary,
+ REC->sprite, REC->cursor, yesno(REC->cxsr), REC->sr_plane,
+ REC->sr_cursor, REC->sr_fbc, yesno(REC->hpll), REC->hpll_plane,
+ REC->hpll_cursor, REC->hpll_fbc, yesno(REC->fbc)
+--
+Notice the call to function _yesno()_ in the print arguments. In the kernel
+context, this function has the following implementation:
+[source,c]
+--
+static const char *yesno(int x)
+{
+ static const char *yes = "yes";
+ static const char *no = "no";
+
+ return x ? yes : no;
+}
+--
+The user space event parser has no idea how to handle this _yesno()_ function.
+The _tep_register_print_function()_ API can be used to register a user space
+helper function, mapped to the kernel's _yesno()_:
+[source,c]
+--
+#include <event-parse.h>
+#include <trace-seq.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+static const char *yes_no_helper(int x)
+{
+ return x ? "yes" : "no";
+}
+...
+ if ( tep_register_print_function(tep,
+ yes_no_helper,
+ TEP_FUNC_ARG_STRING,
+ "yesno",
+ TEP_FUNC_ARG_INT,
+ TEP_FUNC_ARG_VOID) != 0) {
+ /* Failed to register yes_no_helper function */
+ }
+
+/*
+ Now, when the event parser encounters this yesno() function, it will know
+ how to handle it.
+*/
+...
+ if (tep_unregister_print_function(tep, yes_no_helper, "yesno") != 0) {
+ /* Failed to unregister yes_no_helper function */
+ }
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*trace-seq.h*
+ Header file to include in order to have access to trace sequences
+ related APIs. Trace sequences are used to allow a function to call
+ several other functions to create a string of data to use.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-set_flag.txt b/tools/lib/traceevent/Documentation/libtraceevent-set_flag.txt
new file mode 100644
index 000000000000..b0599780b9a6
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-set_flag.txt
@@ -0,0 +1,104 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_set_flag, tep_clear_flag, tep_test_flag -
+Manage flags of trace event parser context.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+enum *tep_flag* {
+ _TEP_NSEC_OUTPUT_,
+ _TEP_DISABLE_SYS_PLUGINS_,
+ _TEP_DISABLE_PLUGINS_
+};
+void *tep_set_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
+void *tep_clear_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
+bool *tep_test_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
+--
+
+DESCRIPTION
+-----------
+Trace event parser context flags are defined in *enum tep_flag*:
+[verse]
+--
+_TEP_NSEC_OUTPUT_ - print event's timestamp in nano seconds, instead of micro seconds.
+_TEP_DISABLE_SYS_PLUGINS_ - disable plugins, located in system's plugin
+ directory. This directory is defined at library compile
+ time, and usually depends on library installation
+ prefix: (install_preffix)/lib/traceevent/plugins
+_TEP_DISABLE_PLUGINS_ - disable all library plugins:
+ - in system's plugin directory
+ - in directory, defined by the environment variable _TRACEEVENT_PLUGIN_DIR_
+ - in user's home directory, _~/.traceevent/plugins_
+--
+Note: plugin related flags must me set before calling _tep_load_plugins()_ API.
+
+The _tep_set_flag()_ function sets _flag_ to _tep_ context.
+
+The _tep_clear_flag()_ function clears _flag_ from _tep_ context.
+
+The _tep_test_flag()_ function tests if _flag_ is set to _tep_ context.
+
+RETURN VALUE
+------------
+_tep_test_flag()_ function returns true if _flag_ is set, false otherwise.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+/* Print timestamps in nanoseconds */
+tep_set_flag(tep, TEP_NSEC_OUTPUT);
+...
+if (tep_test_flag(tep, TEP_NSEC_OUTPUT)) {
+ /* print timestamps in nanoseconds */
+} else {
+ /* print timestamps in microseconds */
+}
+...
+/* Print timestamps in microseconds */
+tep_clear_flag(tep, TEP_NSEC_OUTPUT);
+...
+--
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-strerror.txt b/tools/lib/traceevent/Documentation/libtraceevent-strerror.txt
new file mode 100644
index 000000000000..ee4062a00c9f
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-strerror.txt
@@ -0,0 +1,85 @@
+libtraceevent(3)
+================
+
+NAME
+----
+tep_strerror - Returns a string describing regular errno and tep error number.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+int *tep_strerror*(struct tep_handle pass:[*]_tep_, enum tep_errno _errnum_, char pass:[*]_buf_, size_t _buflen_);
+
+--
+DESCRIPTION
+-----------
+The _tep_strerror()_ function converts tep error number into a human
+readable string.
+The _tep_ argument is trace event parser context. The _errnum_ is a regular
+errno, defined in errno.h, or a tep error number. The string, describing this
+error number is copied in the _buf_ argument. The _buflen_ argument is
+the size of the _buf_.
+
+It as a thread safe wrapper around strerror_r(). The library function has two
+different behaviors - POSIX and GNU specific. The _tep_strerror()_ API always
+behaves as the POSIX version - the error string is copied in the user supplied
+buffer.
+
+RETURN VALUE
+------------
+The _tep_strerror()_ function returns 0, if a valid _errnum_ is passed and the
+string is copied into _buf_. If _errnum_ is not a valid error number,
+-1 is returned and _buf_ is not modified.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+...
+struct tep_handle *tep = tep_alloc();
+...
+char buf[32];
+char *pool = calloc(1, 128);
+if (tep == NULL) {
+ tep_strerror(tep, TEP_ERRNO__MEM_ALLOC_FAILED, buf, 32);
+ printf ("The pool is not initialized, %s", buf);
+}
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-tseq.txt b/tools/lib/traceevent/Documentation/libtraceevent-tseq.txt
new file mode 100644
index 000000000000..8ac6aa174e12
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent-tseq.txt
@@ -0,0 +1,158 @@
+libtraceevent(3)
+================
+
+NAME
+----
+trace_seq_init, trace_seq_destroy, trace_seq_reset, trace_seq_terminate,
+trace_seq_putc, trace_seq_puts, trace_seq_printf, trace_seq_vprintf,
+trace_seq_do_fprintf, trace_seq_do_printf -
+Initialize / destroy a trace sequence.
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+*#include <trace-seq.h>*
+
+void *trace_seq_init*(struct trace_seq pass:[*]_s_);
+void *trace_seq_destroy*(struct trace_seq pass:[*]_s_);
+void *trace_seq_reset*(struct trace_seq pass:[*]_s_);
+void *trace_seq_terminate*(struct trace_seq pass:[*]_s_);
+int *trace_seq_putc*(struct trace_seq pass:[*]_s_, unsigned char _c_);
+int *trace_seq_puts*(struct trace_seq pass:[*]_s_, const char pass:[*]_str_);
+int *trace_seq_printf*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, _..._);
+int *trace_seq_vprintf*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, va_list _args_);
+int *trace_seq_do_printf*(struct trace_seq pass:[*]_s_);
+int *trace_seq_do_fprintf*(struct trace_seq pass:[*]_s_, FILE pass:[*]_fp_);
+--
+
+DESCRIPTION
+-----------
+Trace sequences are used to allow a function to call several other functions
+to create a string of data to use.
+
+The _trace_seq_init()_ function initializes the trace sequence _s_.
+
+The _trace_seq_destroy()_ function destroys the trace sequence _s_ and frees
+all its resources that it had used.
+
+The _trace_seq_reset()_ function re-initializes the trace sequence _s_. All
+characters already written in _s_ will be deleted.
+
+The _trace_seq_terminate()_ function terminates the trace sequence _s_. It puts
+the null character pass:['\0'] at the end of the buffer.
+
+The _trace_seq_putc()_ function puts a single character _c_ in the trace
+sequence _s_.
+
+The _trace_seq_puts()_ function puts a NULL terminated string _str_ in the
+trace sequence _s_.
+
+The _trace_seq_printf()_ function puts a formated string _fmt _with
+variable arguments _..._ in the trace sequence _s_.
+
+The _trace_seq_vprintf()_ function puts a formated string _fmt _with
+list of arguments _args_ in the trace sequence _s_.
+
+The _trace_seq_do_printf()_ function prints the buffer of trace sequence _s_ to
+the standard output stdout.
+
+The _trace_seq_do_fprintf()_ function prints the buffer of trace sequence _s_
+to the given file _fp_.
+
+RETURN VALUE
+------------
+Both _trace_seq_putc()_ and _trace_seq_puts()_ functions return the number of
+characters put in the trace sequence, or 0 in case of an error
+
+Both _trace_seq_printf()_ and _trace_seq_vprintf()_ functions return 0 if the
+trace oversizes the buffer's free space, the number of characters printed, or
+a negative value in case of an error.
+
+Both _trace_seq_do_printf()_ and _trace_seq_do_fprintf()_ functions return the
+number of printed characters, or -1 in case of an error.
+
+EXAMPLE
+-------
+[source,c]
+--
+#include <event-parse.h>
+#include <trace-seq.h>
+...
+struct trace_seq seq;
+trace_seq_init(&seq);
+...
+void foo_seq_print(struct trace_seq *tseq, char *format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+ if (trace_seq_vprintf(tseq, format, ap) <= 0) {
+ /* Failed to print in the trace sequence */
+ }
+ va_end(ap);
+}
+
+trace_seq_reset(&seq);
+
+char *str = " MAN page example";
+if (trace_seq_puts(&seq, str) != strlen(str)) {
+ /* Failed to put str in the trace sequence */
+}
+if (trace_seq_putc(&seq, ':') != 1) {
+ /* Failed to put ':' in the trace sequence */
+}
+if (trace_seq_printf(&seq, " trace sequence: %d", 1) <= 0) {
+ /* Failed to print in the trace sequence */
+}
+foo_seq_print( &seq, " %d\n", 2);
+
+trace_seq_terminate(&seq);
+...
+
+if (trace_seq_do_printf(&seq) < 0 ) {
+ /* Failed to print the sequence buffer to the standard output */
+}
+FILE *fp = fopen("trace.txt", "w");
+if (trace_seq_do_fprintf(&seq, fp) < 0 ) [
+ /* Failed to print the sequence buffer to the trace.txt file */
+}
+
+trace_seq_destroy(&seq);
+...
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*trace-seq.h*
+ Header file to include in order to have access to trace sequences related APIs.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_libtraceevent(3)_, _trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent.txt b/tools/lib/traceevent/Documentation/libtraceevent.txt
new file mode 100644
index 000000000000..fbd977b47de1
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/libtraceevent.txt
@@ -0,0 +1,203 @@
+libtraceevent(3)
+================
+
+NAME
+----
+libtraceevent - Linux kernel trace event library
+
+SYNOPSIS
+--------
+[verse]
+--
+*#include <event-parse.h>*
+
+Management of tep handler data structure and access of its members:
+ struct tep_handle pass:[*]*tep_alloc*(void);
+ void *tep_free*(struct tep_handle pass:[*]_tep_);
+ void *tep_ref*(struct tep_handle pass:[*]_tep_);
+ void *tep_unref*(struct tep_handle pass:[*]_tep_);
+ int *tep_ref_get*(struct tep_handle pass:[*]_tep_);
+ void *tep_set_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
+ void *tep_clear_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
+ bool *tep_test_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flags_);
+ int *tep_get_cpus*(struct tep_handle pass:[*]_tep_);
+ void *tep_set_cpus*(struct tep_handle pass:[*]_tep_, int _cpus_);
+ int *tep_get_long_size*(strucqt tep_handle pass:[*]_tep_);
+ void *tep_set_long_size*(struct tep_handle pass:[*]_tep_, int _long_size_);
+ int *tep_get_page_size*(struct tep_handle pass:[*]_tep_);
+ void *tep_set_page_size*(struct tep_handle pass:[*]_tep_, int _page_size_);
+ bool *tep_is_latency_format*(struct tep_handle pass:[*]_tep_);
+ void *tep_set_latency_format*(struct tep_handle pass:[*]_tep_, int _lat_);
+ int *tep_get_header_page_size*(struct tep_handle pass:[*]_tep_);
+ int *tep_get_header_timestamp_size*(struct tep_handle pass:[*]_tep_);
+ bool *tep_is_old_format*(struct tep_handle pass:[*]_tep_);
+ int *tep_strerror*(struct tep_handle pass:[*]_tep_, enum tep_errno _errnum_, char pass:[*]_buf_, size_t _buflen_);
+
+Register / unregister APIs:
+ int *tep_register_trace_clock*(struct tep_handle pass:[*]_tep_, const char pass:[*]_trace_clock_);
+ int *tep_register_function*(struct tep_handle pass:[*]_tep_, char pass:[*]_name_, unsigned long long _addr_, char pass:[*]_mod_);
+ int *tep_register_event_handler*(struct tep_handle pass:[*]_tep_, int _id_, const char pass:[*]_sys_name_, const char pass:[*]_event_name_, tep_event_handler_func _func_, void pass:[*]_context_);
+ int *tep_unregister_event_handler*(struct tep_handle pass:[*]tep, int id, const char pass:[*]sys_name, const char pass:[*]event_name, tep_event_handler_func func, void pass:[*]_context_);
+ int *tep_register_print_string*(struct tep_handle pass:[*]_tep_, const char pass:[*]_fmt_, unsigned long long _addr_);
+ int *tep_register_print_function*(struct tep_handle pass:[*]_tep_, tep_func_handler _func_, enum tep_func_arg_type _ret_type_, char pass:[*]_name_, _..._);
+ int *tep_unregister_print_function*(struct tep_handle pass:[*]_tep_, tep_func_handler _func_, char pass:[*]_name_);
+
+Plugins management:
+ struct tep_plugin_list pass:[*]*tep_load_plugins*(struct tep_handle pass:[*]_tep_);
+ void *tep_unload_plugins*(struct tep_plugin_list pass:[*]_plugin_list_, struct tep_handle pass:[*]_tep_);
+ char pass:[*]pass:[*]*tep_plugin_list_options*(void);
+ void *tep_plugin_free_options_list*(char pass:[*]pass:[*]_list_);
+ int *tep_plugin_add_options*(const char pass:[*]_name_, struct tep_plugin_option pass:[*]_options_);
+ void *tep_plugin_remove_options*(struct tep_plugin_option pass:[*]_options_);
+ void *tep_print_plugins*(struct trace_seq pass:[*]_s_, const char pass:[*]_prefix_, const char pass:[*]_suffix_, const struct tep_plugin_list pass:[*]_list_);
+
+Event related APIs:
+ struct tep_event pass:[*]*tep_get_event*(struct tep_handle pass:[*]_tep_, int _index_);
+ struct tep_event pass:[*]*tep_get_first_event*(struct tep_handle pass:[*]_tep_);
+ int *tep_get_events_count*(struct tep_handle pass:[*]_tep_);
+ struct tep_event pass:[*]pass:[*]*tep_list_events*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_);
+ struct tep_event pass:[*]pass:[*]*tep_list_events_copy*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_);
+
+Event printing:
+ void *tep_print_event*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_record pass:[*]_record_, bool _use_trace_clock_);
+ void *tep_print_event_data*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]_record_);
+ void *tep_event_info*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]_record_);
+ void *tep_print_event_task*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]_record_);
+ void *tep_print_event_time*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]record, bool _use_trace_clock_);
+ void *tep_set_print_raw*(struct tep_handle pass:[*]_tep_, int _print_raw_);
+
+Event finding:
+ struct tep_event pass:[*]*tep_find_event*(struct tep_handle pass:[*]_tep_, int _id_);
+ struct tep_event pass:[*]*tep_find_event_by_name*(struct tep_handle pass:[*]_tep_, const char pass:[*]_sys_, const char pass:[*]_name_);
+ struct tep_event pass:[*]*tep_find_event_by_record*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_record_);
+
+Parsing of event files:
+ int *tep_parse_header_page*(struct tep_handle pass:[*]_tep_, char pass:[*]_buf_, unsigned long _size_, int _long_size_);
+ enum tep_errno *tep_parse_event*(struct tep_handle pass:[*]_tep_, const char pass:[*]_buf_, unsigned long _size_, const char pass:[*]_sys_);
+ enum tep_errno *tep_parse_format*(struct tep_handle pass:[*]_tep_, struct tep_event pass:[*]pass:[*]_eventp_, const char pass:[*]_buf_, unsigned long _size_, const char pass:[*]_sys_);
+
+APIs related to fields from event's format files:
+ struct tep_format_field pass:[*]pass:[*]*tep_event_common_fields*(struct tep_event pass:[*]_event_);
+ struct tep_format_field pass:[*]pass:[*]*tep_event_fields*(struct tep_event pass:[*]_event_);
+ void pass:[*]*tep_get_field_raw*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int pass:[*]_len_, int _err_);
+ int *tep_get_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
+ int *tep_get_common_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
+ int *tep_get_any_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
+ int *tep_read_number_field*(struct tep_format_field pass:[*]_field_, const void pass:[*]_data_, unsigned long long pass:[*]_value_);
+
+Event fields printing:
+ void *tep_print_field*(struct trace_seq pass:[*]_s_, void pass:[*]_data_, struct tep_format_field pass:[*]_field_);
+ void *tep_print_fields*(struct trace_seq pass:[*]_s_, void pass:[*]_data_, int _size_, struct tep_event pass:[*]_event_);
+ int *tep_print_num_field*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int _err_);
+ int *tep_print_func_field*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int _err_);
+
+Event fields finding:
+ struct tep_format_field pass:[*]*tep_find_common_field*(struct tep_event pass:[*]_event_, const char pass:[*]_name_);
+ struct tep_format_field pass:[*]*tep_find_field*(struct tep_event_ormat pass:[*]_event_, const char pass:[*]_name_);
+ struct tep_format_field pass:[*]*tep_find_any_field*(struct tep_event pass:[*]_event_, const char pass:[*]_name_);
+
+Functions resolver:
+ int *tep_set_function_resolver*(struct tep_handle pass:[*]_tep_, tep_func_resolver_t pass:[*]_func_, void pass:[*]_priv_);
+ void *tep_reset_function_resolver*(struct tep_handle pass:[*]_tep_);
+ const char pass:[*]*tep_find_function*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
+ unsigned long long *tep_find_function_address*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
+
+Filter management:
+ struct tep_event_filter pass:[*]*tep_filter_alloc*(struct tep_handle pass:[*]_tep_);
+ enum tep_errno *tep_filter_add_filter_str*(struct tep_event_filter pass:[*]_filter_, const char pass:[*]_filter_str_);
+ enum tep_errno *tep_filter_match*(struct tep_event_filter pass:[*]_filter_, struct tep_record pass:[*]_record_);
+ int *tep_filter_strerror*(struct tep_event_filter pass:[*]_filter_, enum tep_errno _err_, char pass:[*]buf, size_t _buflen_);
+ int *tep_event_filtered*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
+ void *tep_filter_reset*(struct tep_event_filter pass:[*]_filter_);
+ void *tep_filter_free*(struct tep_event_filter pass:[*]_filter_);
+ char pass:[*]*tep_filter_make_string*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
+ int *tep_filter_remove_event*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
+ int *tep_filter_copy*(struct tep_event_filter pass:[*]_dest_, struct tep_event_filter pass:[*]_source_);
+ int *tep_filter_compare*(struct tep_event_filter pass:[*]_filter1_, struct tep_event_filter pass:[*]_filter2_);
+
+Parsing various data from the records:
+ void *tep_data_latency_format*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_record pass:[*]_record_);
+ int *tep_data_type*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
+ int *tep_data_pid*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
+ int *tep_data_preempt_count*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
+ int *tep_data_flags*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
+
+Command and task related APIs:
+ const char pass:[*]*tep_data_comm_from_pid*(struct tep_handle pass:[*]_tep_, int _pid_);
+ struct cmdline pass:[*]*tep_data_pid_from_comm*(struct tep_handle pass:[*]_tep_, const char pass:[*]_comm_, struct cmdline pass:[*]_next_);
+ int *tep_register_comm*(struct tep_handle pass:[*]_tep_, const char pass:[*]_comm_, int _pid_);
+ int *tep_override_comm*(struct tep_handle pass:[*]_tep_, const char pass:[*]_comm_, int _pid_);
+ bool *tep_is_pid_registered*(struct tep_handle pass:[*]_tep_, int _pid_);
+ int *tep_cmdline_pid*(struct tep_handle pass:[*]_tep_, struct cmdline pass:[*]_cmdline_);
+
+Endian related APIs:
+ int *tep_is_bigendian*(void);
+ unsigned long long *tep_read_number*(struct tep_handle pass:[*]_tep_, const void pass:[*]_ptr_, int _size_);
+ bool *tep_is_file_bigendian*(struct tep_handle pass:[*]_tep_);
+ void *tep_set_file_bigendian*(struct tep_handle pass:[*]_tep_, enum tep_endian _endian_);
+ bool *tep_is_local_bigendian*(struct tep_handle pass:[*]_tep_);
+ void *tep_set_local_bigendian*(struct tep_handle pass:[*]_tep_, enum tep_endian _endian_);
+
+Trace sequences:
+*#include <trace-seq.h>*
+ void *trace_seq_init*(struct trace_seq pass:[*]_s_);
+ void *trace_seq_reset*(struct trace_seq pass:[*]_s_);
+ void *trace_seq_destroy*(struct trace_seq pass:[*]_s_);
+ int *trace_seq_printf*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, ...);
+ int *trace_seq_vprintf*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, va_list _args_);
+ int *trace_seq_puts*(struct trace_seq pass:[*]_s_, const char pass:[*]_str_);
+ int *trace_seq_putc*(struct trace_seq pass:[*]_s_, unsigned char _c_);
+ void *trace_seq_terminate*(struct trace_seq pass:[*]_s_);
+ int *trace_seq_do_fprintf*(struct trace_seq pass:[*]_s_, FILE pass:[*]_fp_);
+ int *trace_seq_do_printf*(struct trace_seq pass:[*]_s_);
+--
+
+DESCRIPTION
+-----------
+The libtraceevent(3) library provides APIs to access kernel tracepoint events,
+located in the tracefs file system under the events directory.
+
+ENVIRONMENT
+-----------
+[verse]
+--
+TRACEEVENT_PLUGIN_DIR
+ Additional plugin directory. All shared object files, located in this directory will be loaded as traceevent plugins.
+--
+
+FILES
+-----
+[verse]
+--
+*event-parse.h*
+ Header file to include in order to have access to the library APIs.
+*trace-seq.h*
+ Header file to include in order to have access to trace sequences related APIs.
+ Trace sequences are used to allow a function to call several other functions
+ to create a string of data to use.
+*-ltraceevent*
+ Linker switch to add when building a program that uses the library.
+--
+
+SEE ALSO
+--------
+_trace-cmd(1)_
+
+AUTHOR
+------
+[verse]
+--
+*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
+*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
+--
+REPORTING BUGS
+--------------
+Report bugs to <linux-trace-devel@vger.kernel.org>
+
+LICENSE
+-------
+libtraceevent is Free Software licensed under the GNU LGPL 2.1
+
+RESOURCES
+---------
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/manpage-1.72.xsl b/tools/lib/traceevent/Documentation/manpage-1.72.xsl
new file mode 100644
index 000000000000..b4d315cb8c47
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/manpage-1.72.xsl
@@ -0,0 +1,14 @@
+<!-- manpage-1.72.xsl:
+ special settings for manpages rendered from asciidoc+docbook
+ handles peculiarities in docbook-xsl 1.72.0 -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0">
+
+<xsl:import href="manpage-base.xsl"/>
+
+<!-- these are the special values for the roff control characters
+ needed for docbook-xsl 1.72.0 -->
+<xsl:param name="git.docbook.backslash">&#x2593;</xsl:param>
+<xsl:param name="git.docbook.dot" >&#x2302;</xsl:param>
+
+</xsl:stylesheet>
diff --git a/tools/lib/traceevent/Documentation/manpage-base.xsl b/tools/lib/traceevent/Documentation/manpage-base.xsl
new file mode 100644
index 000000000000..a264fa616093
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/manpage-base.xsl
@@ -0,0 +1,35 @@
+<!-- manpage-base.xsl:
+ special formatting for manpages rendered from asciidoc+docbook -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0">
+
+<!-- these params silence some output from xmlto -->
+<xsl:param name="man.output.quietly" select="1"/>
+<xsl:param name="refentry.meta.get.quietly" select="1"/>
+
+<!-- convert asciidoc callouts to man page format;
+ git.docbook.backslash and git.docbook.dot params
+ must be supplied by another XSL file or other means -->
+<xsl:template match="co">
+ <xsl:value-of select="concat(
+ $git.docbook.backslash,'fB(',
+ substring-after(@id,'-'),')',
+ $git.docbook.backslash,'fR')"/>
+</xsl:template>
+<xsl:template match="calloutlist">
+ <xsl:value-of select="$git.docbook.dot"/>
+ <xsl:text>sp&#10;</xsl:text>
+ <xsl:apply-templates/>
+ <xsl:text>&#10;</xsl:text>
+</xsl:template>
+<xsl:template match="callout">
+ <xsl:value-of select="concat(
+ $git.docbook.backslash,'fB',
+ substring-after(@arearefs,'-'),
+ '. ',$git.docbook.backslash,'fR')"/>
+ <xsl:apply-templates/>
+ <xsl:value-of select="$git.docbook.dot"/>
+ <xsl:text>br&#10;</xsl:text>
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/tools/lib/traceevent/Documentation/manpage-bold-literal.xsl b/tools/lib/traceevent/Documentation/manpage-bold-literal.xsl
new file mode 100644
index 000000000000..608eb5df6281
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/manpage-bold-literal.xsl
@@ -0,0 +1,17 @@
+<!-- manpage-bold-literal.xsl:
+ special formatting for manpages rendered from asciidoc+docbook -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0">
+
+<!-- render literal text as bold (instead of plain or monospace);
+ this makes literal text easier to distinguish in manpages
+ viewed on a tty -->
+<xsl:template match="literal">
+ <xsl:value-of select="$git.docbook.backslash"/>
+ <xsl:text>fB</xsl:text>
+ <xsl:apply-templates/>
+ <xsl:value-of select="$git.docbook.backslash"/>
+ <xsl:text>fR</xsl:text>
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/tools/lib/traceevent/Documentation/manpage-normal.xsl b/tools/lib/traceevent/Documentation/manpage-normal.xsl
new file mode 100644
index 000000000000..a48f5b11f3dc
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/manpage-normal.xsl
@@ -0,0 +1,13 @@
+<!-- manpage-normal.xsl:
+ special settings for manpages rendered from asciidoc+docbook
+ handles anything we want to keep away from docbook-xsl 1.72.0 -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0">
+
+<xsl:import href="manpage-base.xsl"/>
+
+<!-- these are the normal values for the roff control characters -->
+<xsl:param name="git.docbook.backslash">\</xsl:param>
+<xsl:param name="git.docbook.dot" >.</xsl:param>
+
+</xsl:stylesheet>
diff --git a/tools/lib/traceevent/Documentation/manpage-suppress-sp.xsl b/tools/lib/traceevent/Documentation/manpage-suppress-sp.xsl
new file mode 100644
index 000000000000..a63c7632a87d
--- /dev/null
+++ b/tools/lib/traceevent/Documentation/manpage-suppress-sp.xsl
@@ -0,0 +1,21 @@
+<!-- manpage-suppress-sp.xsl:
+ special settings for manpages rendered from asciidoc+docbook
+ handles erroneous, inline .sp in manpage output of some
+ versions of docbook-xsl -->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0">
+
+<!-- attempt to work around spurious .sp at the tail of the line
+ that some versions of docbook stylesheets seem to add -->
+<xsl:template match="simpara">
+ <xsl:variable name="content">
+ <xsl:apply-templates/>
+ </xsl:variable>
+ <xsl:value-of select="normalize-space($content)"/>
+ <xsl:if test="not(ancestor::authorblurb) and
+ not(ancestor::personblurb)">
+ <xsl:text>&#10;&#10;</xsl:text>
+ </xsl:if>
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 941761d9923d..3292c290654f 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -50,9 +50,13 @@ man_dir = $(prefix)/share/man
man_dir_SQ = '$(subst ','\'',$(man_dir))'
pkgconfig_dir ?= $(word 1,$(shell $(PKG_CONFIG) \
--variable pc_path pkg-config | tr ":" " "))
+includedir_relative = traceevent
+includedir = $(prefix)/include/$(includedir_relative)
+includedir_SQ = '$(subst ','\'',$(includedir))'
export man_dir man_dir_SQ INSTALL
export DESTDIR DESTDIR_SQ
+export EVENT_PARSE_VERSION
set_plugin_dir := 1
@@ -279,6 +283,8 @@ define do_install_pkgconfig_file
cp -f ${PKG_CONFIG_FILE}.template ${PKG_CONFIG_FILE}; \
sed -i "s|INSTALL_PREFIX|${1}|g" ${PKG_CONFIG_FILE}; \
sed -i "s|LIB_VERSION|${EVENT_PARSE_VERSION}|g" ${PKG_CONFIG_FILE}; \
+ sed -i "s|LIB_DIR|${libdir}|g" ${PKG_CONFIG_FILE}; \
+ sed -i "s|HEADER_DIR|$(includedir)|g" ${PKG_CONFIG_FILE}; \
$(call do_install,$(PKG_CONFIG_FILE),$(pkgconfig_dir),644); \
else \
(echo Failed to locate pkg-config directory) 1>&2; \
@@ -300,10 +306,10 @@ install_pkgconfig:
install_headers:
$(call QUIET_INSTALL, headers) \
- $(call do_install,event-parse.h,$(prefix)/include/traceevent,644); \
- $(call do_install,event-utils.h,$(prefix)/include/traceevent,644); \
- $(call do_install,trace-seq.h,$(prefix)/include/traceevent,644); \
- $(call do_install,kbuffer.h,$(prefix)/include/traceevent,644)
+ $(call do_install,event-parse.h,$(DESTDIR)$(includedir_SQ),644); \
+ $(call do_install,event-utils.h,$(DESTDIR)$(includedir_SQ),644); \
+ $(call do_install,trace-seq.h,$(DESTDIR)$(includedir_SQ),644); \
+ $(call do_install,kbuffer.h,$(DESTDIR)$(includedir_SQ),644)
install: install_lib
@@ -313,6 +319,38 @@ clean:
$(RM) TRACEEVENT-CFLAGS tags TAGS; \
$(RM) $(PKG_CONFIG_FILE)
+PHONY += doc
+doc:
+ $(call descend,Documentation)
+
+PHONY += doc-clean
+doc-clean:
+ $(call descend,Documentation,clean)
+
+PHONY += doc-install
+doc-install:
+ $(call descend,Documentation,install)
+
+PHONY += doc-uninstall
+doc-uninstall:
+ $(call descend,Documentation,uninstall)
+
+PHONY += help
+help:
+ @echo 'Possible targets:'
+ @echo''
+ @echo ' all - default, compile the library and the'\
+ 'plugins'
+ @echo ' plugins - compile the plugins'
+ @echo ' install - install the library, the plugins,'\
+ 'the header and pkgconfig files'
+ @echo ' clean - clean the library and the plugins object files'
+ @echo ' doc - compile the documentation files - man'\
+ 'and html pages, in the Documentation directory'
+ @echo ' doc-clean - clean the documentation files'
+ @echo ' doc-install - install the man pages'
+ @echo ' doc-uninstall - uninstall the man pages'
+ @echo''
PHONY += force plugins
force:
diff --git a/tools/lib/traceevent/event-parse-api.c b/tools/lib/traceevent/event-parse-api.c
index d463761a58f4..988587840c80 100644
--- a/tools/lib/traceevent/event-parse-api.c
+++ b/tools/lib/traceevent/event-parse-api.c
@@ -9,6 +9,22 @@
#include "event-utils.h"
/**
+ * tep_get_event - returns the event with the given index
+ * @tep: a handle to the tep_handle
+ * @index: index of the requested event, in the range 0 .. nr_events
+ *
+ * This returns pointer to the element of the events array with the given index
+ * If @tep is NULL, or @index is not in the range 0 .. nr_events, NULL is returned.
+ */
+struct tep_event *tep_get_event(struct tep_handle *tep, int index)
+{
+ if (tep && tep->events && index < tep->nr_events)
+ return tep->events[index];
+
+ return NULL;
+}
+
+/**
* tep_get_first_event - returns the first event in the events array
* @tep: a handle to the tep_handle
*
@@ -17,10 +33,7 @@
*/
struct tep_event *tep_get_first_event(struct tep_handle *tep)
{
- if (tep && tep->events)
- return tep->events[0];
-
- return NULL;
+ return tep_get_event(tep, 0);
}
/**
@@ -32,7 +45,7 @@ struct tep_event *tep_get_first_event(struct tep_handle *tep)
*/
int tep_get_events_count(struct tep_handle *tep)
{
- if(tep)
+ if (tep)
return tep->nr_events;
return 0;
}
@@ -43,19 +56,47 @@ int tep_get_events_count(struct tep_handle *tep)
* @flag: flag, or combination of flags to be set
* can be any combination from enum tep_flag
*
- * This sets a flag or mbination of flags from enum tep_flag
- */
+ * This sets a flag or combination of flags from enum tep_flag
+ */
void tep_set_flag(struct tep_handle *tep, int flag)
{
- if(tep)
+ if (tep)
tep->flags |= flag;
}
-unsigned short tep_data2host2(struct tep_handle *pevent, unsigned short data)
+/**
+ * tep_clear_flag - clear event parser flag
+ * @tep: a handle to the tep_handle
+ * @flag: flag to be cleared
+ *
+ * This clears a tep flag
+ */
+void tep_clear_flag(struct tep_handle *tep, enum tep_flag flag)
+{
+ if (tep)
+ tep->flags &= ~flag;
+}
+
+/**
+ * tep_test_flag - check the state of event parser flag
+ * @tep: a handle to the tep_handle
+ * @flag: flag to be checked
+ *
+ * This returns the state of the requested tep flag.
+ * Returns: true if the flag is set, false otherwise.
+ */
+bool tep_test_flag(struct tep_handle *tep, enum tep_flag flag)
+{
+ if (tep)
+ return tep->flags & flag;
+ return false;
+}
+
+unsigned short tep_data2host2(struct tep_handle *tep, unsigned short data)
{
unsigned short swap;
- if (!pevent || pevent->host_bigendian == pevent->file_bigendian)
+ if (!tep || tep->host_bigendian == tep->file_bigendian)
return data;
swap = ((data & 0xffULL) << 8) |
@@ -64,11 +105,11 @@ unsigned short tep_data2host2(struct tep_handle *pevent, unsigned short data)
return swap;
}
-unsigned int tep_data2host4(struct tep_handle *pevent, unsigned int data)
+unsigned int tep_data2host4(struct tep_handle *tep, unsigned int data)
{
unsigned int swap;
- if (!pevent || pevent->host_bigendian == pevent->file_bigendian)
+ if (!tep || tep->host_bigendian == tep->file_bigendian)
return data;
swap = ((data & 0xffULL) << 24) |
@@ -80,11 +121,11 @@ unsigned int tep_data2host4(struct tep_handle *pevent, unsigned int data)
}
unsigned long long
-tep_data2host8(struct tep_handle *pevent, unsigned long long data)
+tep_data2host8(struct tep_handle *tep, unsigned long long data)
{
unsigned long long swap;
- if (!pevent || pevent->host_bigendian == pevent->file_bigendian)
+ if (!tep || tep->host_bigendian == tep->file_bigendian)
return data;
swap = ((data & 0xffULL) << 56) |
@@ -101,175 +142,232 @@ tep_data2host8(struct tep_handle *pevent, unsigned long long data)
/**
* tep_get_header_page_size - get size of the header page
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
*
* This returns size of the header page
- * If @pevent is NULL, 0 is returned.
+ * If @tep is NULL, 0 is returned.
+ */
+int tep_get_header_page_size(struct tep_handle *tep)
+{
+ if (tep)
+ return tep->header_page_size_size;
+ return 0;
+}
+
+/**
+ * tep_get_header_timestamp_size - get size of the timestamp in the header page
+ * @tep: a handle to the tep_handle
+ *
+ * This returns size of the timestamp in the header page
+ * If @tep is NULL, 0 is returned.
*/
-int tep_get_header_page_size(struct tep_handle *pevent)
+int tep_get_header_timestamp_size(struct tep_handle *tep)
{
- if(pevent)
- return pevent->header_page_size_size;
+ if (tep)
+ return tep->header_page_ts_size;
return 0;
}
/**
* tep_get_cpus - get the number of CPUs
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
*
* This returns the number of CPUs
- * If @pevent is NULL, 0 is returned.
+ * If @tep is NULL, 0 is returned.
*/
-int tep_get_cpus(struct tep_handle *pevent)
+int tep_get_cpus(struct tep_handle *tep)
{
- if(pevent)
- return pevent->cpus;
+ if (tep)
+ return tep->cpus;
return 0;
}
/**
* tep_set_cpus - set the number of CPUs
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
*
* This sets the number of CPUs
*/
-void tep_set_cpus(struct tep_handle *pevent, int cpus)
+void tep_set_cpus(struct tep_handle *tep, int cpus)
{
- if(pevent)
- pevent->cpus = cpus;
+ if (tep)
+ tep->cpus = cpus;
}
/**
- * tep_get_long_size - get the size of a long integer on the current machine
- * @pevent: a handle to the tep_handle
+ * tep_get_long_size - get the size of a long integer on the traced machine
+ * @tep: a handle to the tep_handle
*
- * This returns the size of a long integer on the current machine
- * If @pevent is NULL, 0 is returned.
+ * This returns the size of a long integer on the traced machine
+ * If @tep is NULL, 0 is returned.
*/
-int tep_get_long_size(struct tep_handle *pevent)
+int tep_get_long_size(struct tep_handle *tep)
{
- if(pevent)
- return pevent->long_size;
+ if (tep)
+ return tep->long_size;
return 0;
}
/**
- * tep_set_long_size - set the size of a long integer on the current machine
- * @pevent: a handle to the tep_handle
+ * tep_set_long_size - set the size of a long integer on the traced machine
+ * @tep: a handle to the tep_handle
* @size: size, in bytes, of a long integer
*
- * This sets the size of a long integer on the current machine
+ * This sets the size of a long integer on the traced machine
*/
-void tep_set_long_size(struct tep_handle *pevent, int long_size)
+void tep_set_long_size(struct tep_handle *tep, int long_size)
{
- if(pevent)
- pevent->long_size = long_size;
+ if (tep)
+ tep->long_size = long_size;
}
/**
- * tep_get_page_size - get the size of a memory page on the current machine
- * @pevent: a handle to the tep_handle
+ * tep_get_page_size - get the size of a memory page on the traced machine
+ * @tep: a handle to the tep_handle
*
- * This returns the size of a memory page on the current machine
- * If @pevent is NULL, 0 is returned.
+ * This returns the size of a memory page on the traced machine
+ * If @tep is NULL, 0 is returned.
*/
-int tep_get_page_size(struct tep_handle *pevent)
+int tep_get_page_size(struct tep_handle *tep)
{
- if(pevent)
- return pevent->page_size;
+ if (tep)
+ return tep->page_size;
return 0;
}
/**
- * tep_set_page_size - set the size of a memory page on the current machine
- * @pevent: a handle to the tep_handle
+ * tep_set_page_size - set the size of a memory page on the traced machine
+ * @tep: a handle to the tep_handle
* @_page_size: size of a memory page, in bytes
*
- * This sets the size of a memory page on the current machine
+ * This sets the size of a memory page on the traced machine
*/
-void tep_set_page_size(struct tep_handle *pevent, int _page_size)
+void tep_set_page_size(struct tep_handle *tep, int _page_size)
{
- if(pevent)
- pevent->page_size = _page_size;
+ if (tep)
+ tep->page_size = _page_size;
}
/**
- * tep_file_bigendian - get if the file is in big endian order
- * @pevent: a handle to the tep_handle
+ * tep_is_file_bigendian - return the endian of the file
+ * @tep: a handle to the tep_handle
*
- * This returns if the file is in big endian order
- * If @pevent is NULL, 0 is returned.
+ * This returns true if the file is in big endian order
+ * If @tep is NULL, false is returned.
*/
-int tep_file_bigendian(struct tep_handle *pevent)
+bool tep_is_file_bigendian(struct tep_handle *tep)
{
- if(pevent)
- return pevent->file_bigendian;
- return 0;
+ if (tep)
+ return (tep->file_bigendian == TEP_BIG_ENDIAN);
+ return false;
}
/**
* tep_set_file_bigendian - set if the file is in big endian order
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
* @endian: non zero, if the file is in big endian order
*
* This sets if the file is in big endian order
*/
-void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian)
+void tep_set_file_bigendian(struct tep_handle *tep, enum tep_endian endian)
{
- if(pevent)
- pevent->file_bigendian = endian;
+ if (tep)
+ tep->file_bigendian = endian;
}
/**
- * tep_is_host_bigendian - get if the order of the current host is big endian
- * @pevent: a handle to the tep_handle
+ * tep_is_local_bigendian - return the endian of the saved local machine
+ * @tep: a handle to the tep_handle
*
- * This gets if the order of the current host is big endian
- * If @pevent is NULL, 0 is returned.
+ * This returns true if the saved local machine in @tep is big endian.
+ * If @tep is NULL, false is returned.
*/
-int tep_is_host_bigendian(struct tep_handle *pevent)
+bool tep_is_local_bigendian(struct tep_handle *tep)
{
- if(pevent)
- return pevent->host_bigendian;
+ if (tep)
+ return (tep->host_bigendian == TEP_BIG_ENDIAN);
return 0;
}
/**
- * tep_set_host_bigendian - set the order of the local host
- * @pevent: a handle to the tep_handle
+ * tep_set_local_bigendian - set the stored local machine endian order
+ * @tep: a handle to the tep_handle
* @endian: non zero, if the local host has big endian order
*
- * This sets the order of the local host
+ * This sets the endian order for the local machine.
*/
-void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian)
+void tep_set_local_bigendian(struct tep_handle *tep, enum tep_endian endian)
{
- if(pevent)
- pevent->host_bigendian = endian;
+ if (tep)
+ tep->host_bigendian = endian;
}
/**
* tep_is_latency_format - get if the latency output format is configured
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
*
- * This gets if the latency output format is configured
- * If @pevent is NULL, 0 is returned.
+ * This returns true if the latency output format is configured
+ * If @tep is NULL, false is returned.
*/
-int tep_is_latency_format(struct tep_handle *pevent)
+bool tep_is_latency_format(struct tep_handle *tep)
{
- if(pevent)
- return pevent->latency_format;
- return 0;
+ if (tep)
+ return (tep->latency_format);
+ return false;
}
/**
* tep_set_latency_format - set the latency output format
- * @pevent: a handle to the tep_handle
+ * @tep: a handle to the tep_handle
* @lat: non zero for latency output format
*
* This sets the latency output format
*/
-void tep_set_latency_format(struct tep_handle *pevent, int lat)
+void tep_set_latency_format(struct tep_handle *tep, int lat)
+{
+ if (tep)
+ tep->latency_format = lat;
+}
+
+/**
+ * tep_is_old_format - get if an old kernel is used
+ * @tep: a handle to the tep_handle
+ *
+ * This returns true, if an old kernel is used to generate the tracing events or
+ * false if a new kernel is used. Old kernels did not have header page info.
+ * If @tep is NULL, false is returned.
+ */
+bool tep_is_old_format(struct tep_handle *tep)
+{
+ if (tep)
+ return tep->old_format;
+ return false;
+}
+
+/**
+ * tep_set_print_raw - set a flag to force print in raw format
+ * @tep: a handle to the tep_handle
+ * @print_raw: the new value of the print_raw flag
+ *
+ * This sets a flag to force print in raw format
+ */
+void tep_set_print_raw(struct tep_handle *tep, int print_raw)
+{
+ if (tep)
+ tep->print_raw = print_raw;
+}
+
+/**
+ * tep_set_test_filters - set a flag to test a filter string
+ * @tep: a handle to the tep_handle
+ * @test_filters: the new value of the test_filters flag
+ *
+ * This sets a flag to test a filter string. If this flag is set, when
+ * tep_filter_add_filter_str() API as called,it will print the filter string
+ * instead of adding it.
+ */
+void tep_set_test_filters(struct tep_handle *tep, int test_filters)
{
- if(pevent)
- pevent->latency_format = lat;
+ if (tep)
+ tep->test_filters = test_filters;
}
diff --git a/tools/lib/traceevent/event-parse-local.h b/tools/lib/traceevent/event-parse-local.h
index 35833ee32d6c..09aa142f7fdd 100644
--- a/tools/lib/traceevent/event-parse-local.h
+++ b/tools/lib/traceevent/event-parse-local.h
@@ -92,8 +92,8 @@ struct tep_handle {
void tep_free_event(struct tep_event *event);
void tep_free_format_field(struct tep_format_field *field);
-unsigned short tep_data2host2(struct tep_handle *pevent, unsigned short data);
-unsigned int tep_data2host4(struct tep_handle *pevent, unsigned int data);
-unsigned long long tep_data2host8(struct tep_handle *pevent, unsigned long long data);
+unsigned short tep_data2host2(struct tep_handle *tep, unsigned short data);
+unsigned int tep_data2host4(struct tep_handle *tep, unsigned int data);
+unsigned long long tep_data2host8(struct tep_handle *tep, unsigned long long data);
#endif /* _PARSE_EVENTS_INT_H */
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index abd4fa5d3088..b36b536a9fcb 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -148,14 +148,14 @@ struct cmdline_list {
int pid;
};
-static int cmdline_init(struct tep_handle *pevent)
+static int cmdline_init(struct tep_handle *tep)
{
- struct cmdline_list *cmdlist = pevent->cmdlist;
+ struct cmdline_list *cmdlist = tep->cmdlist;
struct cmdline_list *item;
struct tep_cmdline *cmdlines;
int i;
- cmdlines = malloc(sizeof(*cmdlines) * pevent->cmdline_count);
+ cmdlines = malloc(sizeof(*cmdlines) * tep->cmdline_count);
if (!cmdlines)
return -1;
@@ -169,15 +169,15 @@ static int cmdline_init(struct tep_handle *pevent)
free(item);
}
- qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
+ qsort(cmdlines, tep->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
- pevent->cmdlines = cmdlines;
- pevent->cmdlist = NULL;
+ tep->cmdlines = cmdlines;
+ tep->cmdlist = NULL;
return 0;
}
-static const char *find_cmdline(struct tep_handle *pevent, int pid)
+static const char *find_cmdline(struct tep_handle *tep, int pid)
{
const struct tep_cmdline *comm;
struct tep_cmdline key;
@@ -185,13 +185,13 @@ static const char *find_cmdline(struct tep_handle *pevent, int pid)
if (!pid)
return "<idle>";
- if (!pevent->cmdlines && cmdline_init(pevent))
+ if (!tep->cmdlines && cmdline_init(tep))
return "<not enough memory for cmdlines!>";
key.pid = pid;
- comm = bsearch(&key, pevent->cmdlines, pevent->cmdline_count,
- sizeof(*pevent->cmdlines), cmdline_cmp);
+ comm = bsearch(&key, tep->cmdlines, tep->cmdline_count,
+ sizeof(*tep->cmdlines), cmdline_cmp);
if (comm)
return comm->comm;
@@ -199,32 +199,32 @@ static const char *find_cmdline(struct tep_handle *pevent, int pid)
}
/**
- * tep_pid_is_registered - return if a pid has a cmdline registered
- * @pevent: handle for the pevent
+ * tep_is_pid_registered - return if a pid has a cmdline registered
+ * @tep: a handle to the trace event parser context
* @pid: The pid to check if it has a cmdline registered with.
*
- * Returns 1 if the pid has a cmdline mapped to it
- * 0 otherwise.
+ * Returns true if the pid has a cmdline mapped to it
+ * false otherwise.
*/
-int tep_pid_is_registered(struct tep_handle *pevent, int pid)
+bool tep_is_pid_registered(struct tep_handle *tep, int pid)
{
const struct tep_cmdline *comm;
struct tep_cmdline key;
if (!pid)
- return 1;
+ return true;
- if (!pevent->cmdlines && cmdline_init(pevent))
- return 0;
+ if (!tep->cmdlines && cmdline_init(tep))
+ return false;
key.pid = pid;
- comm = bsearch(&key, pevent->cmdlines, pevent->cmdline_count,
- sizeof(*pevent->cmdlines), cmdline_cmp);
+ comm = bsearch(&key, tep->cmdlines, tep->cmdline_count,
+ sizeof(*tep->cmdlines), cmdline_cmp);
if (comm)
- return 1;
- return 0;
+ return true;
+ return false;
}
/*
@@ -232,10 +232,10 @@ int tep_pid_is_registered(struct tep_handle *pevent, int pid)
* we must add this pid. This is much slower than when cmdlines
* are added before the array is initialized.
*/
-static int add_new_comm(struct tep_handle *pevent,
+static int add_new_comm(struct tep_handle *tep,
const char *comm, int pid, bool override)
{
- struct tep_cmdline *cmdlines = pevent->cmdlines;
+ struct tep_cmdline *cmdlines = tep->cmdlines;
struct tep_cmdline *cmdline;
struct tep_cmdline key;
char *new_comm;
@@ -246,8 +246,8 @@ static int add_new_comm(struct tep_handle *pevent,
/* avoid duplicates */
key.pid = pid;
- cmdline = bsearch(&key, pevent->cmdlines, pevent->cmdline_count,
- sizeof(*pevent->cmdlines), cmdline_cmp);
+ cmdline = bsearch(&key, tep->cmdlines, tep->cmdline_count,
+ sizeof(*tep->cmdlines), cmdline_cmp);
if (cmdline) {
if (!override) {
errno = EEXIST;
@@ -264,37 +264,37 @@ static int add_new_comm(struct tep_handle *pevent,
return 0;
}
- cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (pevent->cmdline_count + 1));
+ cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (tep->cmdline_count + 1));
if (!cmdlines) {
errno = ENOMEM;
return -1;
}
- cmdlines[pevent->cmdline_count].comm = strdup(comm);
- if (!cmdlines[pevent->cmdline_count].comm) {
+ cmdlines[tep->cmdline_count].comm = strdup(comm);
+ if (!cmdlines[tep->cmdline_count].comm) {
free(cmdlines);
errno = ENOMEM;
return -1;
}
- cmdlines[pevent->cmdline_count].pid = pid;
+ cmdlines[tep->cmdline_count].pid = pid;
- if (cmdlines[pevent->cmdline_count].comm)
- pevent->cmdline_count++;
+ if (cmdlines[tep->cmdline_count].comm)
+ tep->cmdline_count++;
- qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
- pevent->cmdlines = cmdlines;
+ qsort(cmdlines, tep->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
+ tep->cmdlines = cmdlines;
return 0;
}
-static int _tep_register_comm(struct tep_handle *pevent,
+static int _tep_register_comm(struct tep_handle *tep,
const char *comm, int pid, bool override)
{
struct cmdline_list *item;
- if (pevent->cmdlines)
- return add_new_comm(pevent, comm, pid, override);
+ if (tep->cmdlines)
+ return add_new_comm(tep, comm, pid, override);
item = malloc(sizeof(*item));
if (!item)
@@ -309,17 +309,17 @@ static int _tep_register_comm(struct tep_handle *pevent,
return -1;
}
item->pid = pid;
- item->next = pevent->cmdlist;
+ item->next = tep->cmdlist;
- pevent->cmdlist = item;
- pevent->cmdline_count++;
+ tep->cmdlist = item;
+ tep->cmdline_count++;
return 0;
}
/**
* tep_register_comm - register a pid / comm mapping
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
* @comm: the command line to register
* @pid: the pid to map the command line to
*
@@ -327,14 +327,14 @@ static int _tep_register_comm(struct tep_handle *pevent,
* a given pid. The comm is duplicated. If a command with the same pid
* already exist, -1 is returned and errno is set to EEXIST
*/
-int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
+int tep_register_comm(struct tep_handle *tep, const char *comm, int pid)
{
- return _tep_register_comm(pevent, comm, pid, false);
+ return _tep_register_comm(tep, comm, pid, false);
}
/**
* tep_override_comm - register a pid / comm mapping
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
* @comm: the command line to register
* @pid: the pid to map the command line to
*
@@ -342,19 +342,19 @@ int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
* a given pid. The comm is duplicated. If a command with the same pid
* already exist, the command string is udapted with the new one
*/
-int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid)
+int tep_override_comm(struct tep_handle *tep, const char *comm, int pid)
{
- if (!pevent->cmdlines && cmdline_init(pevent)) {
+ if (!tep->cmdlines && cmdline_init(tep)) {
errno = ENOMEM;
return -1;
}
- return _tep_register_comm(pevent, comm, pid, true);
+ return _tep_register_comm(tep, comm, pid, true);
}
-int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock)
+int tep_register_trace_clock(struct tep_handle *tep, const char *trace_clock)
{
- pevent->trace_clock = strdup(trace_clock);
- if (!pevent->trace_clock) {
+ tep->trace_clock = strdup(trace_clock);
+ if (!tep->trace_clock) {
errno = ENOMEM;
return -1;
}
@@ -408,18 +408,18 @@ static int func_bcmp(const void *a, const void *b)
return 1;
}
-static int func_map_init(struct tep_handle *pevent)
+static int func_map_init(struct tep_handle *tep)
{
struct func_list *funclist;
struct func_list *item;
struct func_map *func_map;
int i;
- func_map = malloc(sizeof(*func_map) * (pevent->func_count + 1));
+ func_map = malloc(sizeof(*func_map) * (tep->func_count + 1));
if (!func_map)
return -1;
- funclist = pevent->funclist;
+ funclist = tep->funclist;
i = 0;
while (funclist) {
@@ -432,34 +432,34 @@ static int func_map_init(struct tep_handle *pevent)
free(item);
}
- qsort(func_map, pevent->func_count, sizeof(*func_map), func_cmp);
+ qsort(func_map, tep->func_count, sizeof(*func_map), func_cmp);
/*
* Add a special record at the end.
*/
- func_map[pevent->func_count].func = NULL;
- func_map[pevent->func_count].addr = 0;
- func_map[pevent->func_count].mod = NULL;
+ func_map[tep->func_count].func = NULL;
+ func_map[tep->func_count].addr = 0;
+ func_map[tep->func_count].mod = NULL;
- pevent->func_map = func_map;
- pevent->funclist = NULL;
+ tep->func_map = func_map;
+ tep->funclist = NULL;
return 0;
}
static struct func_map *
-__find_func(struct tep_handle *pevent, unsigned long long addr)
+__find_func(struct tep_handle *tep, unsigned long long addr)
{
struct func_map *func;
struct func_map key;
- if (!pevent->func_map)
- func_map_init(pevent);
+ if (!tep->func_map)
+ func_map_init(tep);
key.addr = addr;
- func = bsearch(&key, pevent->func_map, pevent->func_count,
- sizeof(*pevent->func_map), func_bcmp);
+ func = bsearch(&key, tep->func_map, tep->func_count,
+ sizeof(*tep->func_map), func_bcmp);
return func;
}
@@ -472,15 +472,14 @@ struct func_resolver {
/**
* tep_set_function_resolver - set an alternative function resolver
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
* @resolver: function to be used
* @priv: resolver function private state.
*
* Some tools may have already a way to resolve kernel functions, allow them to
- * keep using it instead of duplicating all the entries inside
- * pevent->funclist.
+ * keep using it instead of duplicating all the entries inside tep->funclist.
*/
-int tep_set_function_resolver(struct tep_handle *pevent,
+int tep_set_function_resolver(struct tep_handle *tep,
tep_func_resolver_t *func, void *priv)
{
struct func_resolver *resolver = malloc(sizeof(*resolver));
@@ -491,38 +490,38 @@ int tep_set_function_resolver(struct tep_handle *pevent,
resolver->func = func;
resolver->priv = priv;
- free(pevent->func_resolver);
- pevent->func_resolver = resolver;
+ free(tep->func_resolver);
+ tep->func_resolver = resolver;
return 0;
}
/**
* tep_reset_function_resolver - reset alternative function resolver
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
*
* Stop using whatever alternative resolver was set, use the default
* one instead.
*/
-void tep_reset_function_resolver(struct tep_handle *pevent)
+void tep_reset_function_resolver(struct tep_handle *tep)
{
- free(pevent->func_resolver);
- pevent->func_resolver = NULL;
+ free(tep->func_resolver);
+ tep->func_resolver = NULL;
}
static struct func_map *
-find_func(struct tep_handle *pevent, unsigned long long addr)
+find_func(struct tep_handle *tep, unsigned long long addr)
{
struct func_map *map;
- if (!pevent->func_resolver)
- return __find_func(pevent, addr);
+ if (!tep->func_resolver)
+ return __find_func(tep, addr);
- map = &pevent->func_resolver->map;
+ map = &tep->func_resolver->map;
map->mod = NULL;
map->addr = addr;
- map->func = pevent->func_resolver->func(pevent->func_resolver->priv,
- &map->addr, &map->mod);
+ map->func = tep->func_resolver->func(tep->func_resolver->priv,
+ &map->addr, &map->mod);
if (map->func == NULL)
return NULL;
@@ -531,18 +530,18 @@ find_func(struct tep_handle *pevent, unsigned long long addr)
/**
* tep_find_function - find a function by a given address
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
* @addr: the address to find the function with
*
* Returns a pointer to the function stored that has the given
* address. Note, the address does not have to be exact, it
* will select the function that would contain the address.
*/
-const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr)
+const char *tep_find_function(struct tep_handle *tep, unsigned long long addr)
{
struct func_map *map;
- map = find_func(pevent, addr);
+ map = find_func(tep, addr);
if (!map)
return NULL;
@@ -551,7 +550,7 @@ const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr
/**
* tep_find_function_address - find a function address by a given address
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
* @addr: the address to find the function with
*
* Returns the address the function starts at. This can be used in
@@ -559,11 +558,11 @@ const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr
* name and the function offset.
*/
unsigned long long
-tep_find_function_address(struct tep_handle *pevent, unsigned long long addr)
+tep_find_function_address(struct tep_handle *tep, unsigned long long addr)
{
struct func_map *map;
- map = find_func(pevent, addr);
+ map = find_func(tep, addr);
if (!map)
return 0;
@@ -572,7 +571,7 @@ tep_find_function_address(struct tep_handle *pevent, unsigned long long addr)
/**
* tep_register_function - register a function with a given address
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
* @function: the function name to register
* @addr: the address the function starts at
* @mod: the kernel module the function may be in (NULL for none)
@@ -580,7 +579,7 @@ tep_find_function_address(struct tep_handle *pevent, unsigned long long addr)
* This registers a function name with an address and module.
* The @func passed in is duplicated.
*/
-int tep_register_function(struct tep_handle *pevent, char *func,
+int tep_register_function(struct tep_handle *tep, char *func,
unsigned long long addr, char *mod)
{
struct func_list *item = malloc(sizeof(*item));
@@ -588,7 +587,7 @@ int tep_register_function(struct tep_handle *pevent, char *func,
if (!item)
return -1;
- item->next = pevent->funclist;
+ item->next = tep->funclist;
item->func = strdup(func);
if (!item->func)
goto out_free;
@@ -601,8 +600,8 @@ int tep_register_function(struct tep_handle *pevent, char *func,
item->mod = NULL;
item->addr = addr;
- pevent->funclist = item;
- pevent->func_count++;
+ tep->funclist = item;
+ tep->func_count++;
return 0;
@@ -617,23 +616,23 @@ out_free:
/**
* tep_print_funcs - print out the stored functions
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
*
* This prints out the stored functions.
*/
-void tep_print_funcs(struct tep_handle *pevent)
+void tep_print_funcs(struct tep_handle *tep)
{
int i;
- if (!pevent->func_map)
- func_map_init(pevent);
+ if (!tep->func_map)
+ func_map_init(tep);
- for (i = 0; i < (int)pevent->func_count; i++) {
+ for (i = 0; i < (int)tep->func_count; i++) {
printf("%016llx %s",
- pevent->func_map[i].addr,
- pevent->func_map[i].func);
- if (pevent->func_map[i].mod)
- printf(" [%s]\n", pevent->func_map[i].mod);
+ tep->func_map[i].addr,
+ tep->func_map[i].func);
+ if (tep->func_map[i].mod)
+ printf(" [%s]\n", tep->func_map[i].mod);
else
printf("\n");
}
@@ -663,18 +662,18 @@ static int printk_cmp(const void *a, const void *b)
return 0;
}
-static int printk_map_init(struct tep_handle *pevent)
+static int printk_map_init(struct tep_handle *tep)
{
struct printk_list *printklist;
struct printk_list *item;
struct printk_map *printk_map;
int i;
- printk_map = malloc(sizeof(*printk_map) * (pevent->printk_count + 1));
+ printk_map = malloc(sizeof(*printk_map) * (tep->printk_count + 1));
if (!printk_map)
return -1;
- printklist = pevent->printklist;
+ printklist = tep->printklist;
i = 0;
while (printklist) {
@@ -686,41 +685,41 @@ static int printk_map_init(struct tep_handle *pevent)
free(item);
}
- qsort(printk_map, pevent->printk_count, sizeof(*printk_map), printk_cmp);
+ qsort(printk_map, tep->printk_count, sizeof(*printk_map), printk_cmp);
- pevent->printk_map = printk_map;
- pevent->printklist = NULL;
+ tep->printk_map = printk_map;
+ tep->printklist = NULL;
return 0;
}
static struct printk_map *
-find_printk(struct tep_handle *pevent, unsigned long long addr)
+find_printk(struct tep_handle *tep, unsigned long long addr)
{
struct printk_map *printk;
struct printk_map key;
- if (!pevent->printk_map && printk_map_init(pevent))
+ if (!tep->printk_map && printk_map_init(tep))
return NULL;
key.addr = addr;
- printk = bsearch(&key, pevent->printk_map, pevent->printk_count,
- sizeof(*pevent->printk_map), printk_cmp);
+ printk = bsearch(&key, tep->printk_map, tep->printk_count,
+ sizeof(*tep->printk_map), printk_cmp);
return printk;
}
/**
* tep_register_print_string - register a string by its address
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
* @fmt: the string format to register
* @addr: the address the string was located at
*
* This registers a string by the address it was stored in the kernel.
* The @fmt passed in is duplicated.
*/
-int tep_register_print_string(struct tep_handle *pevent, const char *fmt,
+int tep_register_print_string(struct tep_handle *tep, const char *fmt,
unsigned long long addr)
{
struct printk_list *item = malloc(sizeof(*item));
@@ -729,7 +728,7 @@ int tep_register_print_string(struct tep_handle *pevent, const char *fmt,
if (!item)
return -1;
- item->next = pevent->printklist;
+ item->next = tep->printklist;
item->addr = addr;
/* Strip off quotes and '\n' from the end */
@@ -747,8 +746,8 @@ int tep_register_print_string(struct tep_handle *pevent, const char *fmt,
if (strcmp(p, "\\n") == 0)
*p = 0;
- pevent->printklist = item;
- pevent->printk_count++;
+ tep->printklist = item;
+ tep->printk_count++;
return 0;
@@ -760,21 +759,21 @@ out_free:
/**
* tep_print_printk - print out the stored strings
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
*
* This prints the string formats that were stored.
*/
-void tep_print_printk(struct tep_handle *pevent)
+void tep_print_printk(struct tep_handle *tep)
{
int i;
- if (!pevent->printk_map)
- printk_map_init(pevent);
+ if (!tep->printk_map)
+ printk_map_init(tep);
- for (i = 0; i < (int)pevent->printk_count; i++) {
+ for (i = 0; i < (int)tep->printk_count; i++) {
printf("%016llx %s\n",
- pevent->printk_map[i].addr,
- pevent->printk_map[i].printk);
+ tep->printk_map[i].addr,
+ tep->printk_map[i].printk);
}
}
@@ -783,29 +782,29 @@ static struct tep_event *alloc_event(void)
return calloc(1, sizeof(struct tep_event));
}
-static int add_event(struct tep_handle *pevent, struct tep_event *event)
+static int add_event(struct tep_handle *tep, struct tep_event *event)
{
int i;
- struct tep_event **events = realloc(pevent->events, sizeof(event) *
- (pevent->nr_events + 1));
+ struct tep_event **events = realloc(tep->events, sizeof(event) *
+ (tep->nr_events + 1));
if (!events)
return -1;
- pevent->events = events;
+ tep->events = events;
- for (i = 0; i < pevent->nr_events; i++) {
- if (pevent->events[i]->id > event->id)
+ for (i = 0; i < tep->nr_events; i++) {
+ if (tep->events[i]->id > event->id)
break;
}
- if (i < pevent->nr_events)
- memmove(&pevent->events[i + 1],
- &pevent->events[i],
- sizeof(event) * (pevent->nr_events - i));
+ if (i < tep->nr_events)
+ memmove(&tep->events[i + 1],
+ &tep->events[i],
+ sizeof(event) * (tep->nr_events - i));
- pevent->events[i] = event;
- pevent->nr_events++;
+ tep->events[i] = event;
+ tep->nr_events++;
- event->pevent = pevent;
+ event->tep = tep;
return 0;
}
@@ -1184,7 +1183,7 @@ static enum tep_event_type read_token(char **tok)
}
/**
- * tep_read_token - access to utilities to use the pevent parser
+ * tep_read_token - access to utilities to use the tep parser
* @tok: The token to return
*
* This will parse tokens from the string given by
@@ -1657,8 +1656,8 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field **
else if (field->flags & TEP_FIELD_IS_STRING)
field->elementsize = 1;
else if (field->flags & TEP_FIELD_IS_LONG)
- field->elementsize = event->pevent ?
- event->pevent->long_size :
+ field->elementsize = event->tep ?
+ event->tep->long_size :
sizeof(long);
} else
field->elementsize = field->size;
@@ -2233,7 +2232,7 @@ eval_type_str(unsigned long long val, const char *type, int pointer)
return val & 0xffffffff;
if (strcmp(type, "u64") == 0 ||
- strcmp(type, "s64"))
+ strcmp(type, "s64") == 0)
return val;
if (strcmp(type, "s8") == 0)
@@ -2457,7 +2456,7 @@ static int arg_num_eval(struct tep_print_arg *arg, long long *val)
static char *arg_eval (struct tep_print_arg *arg)
{
long long val;
- static char buf[20];
+ static char buf[24];
switch (arg->type) {
case TEP_PRINT_ATOM:
@@ -2942,14 +2941,14 @@ process_bitmask(struct tep_event *event __maybe_unused, struct tep_print_arg *ar
}
static struct tep_function_handler *
-find_func_handler(struct tep_handle *pevent, char *func_name)
+find_func_handler(struct tep_handle *tep, char *func_name)
{
struct tep_function_handler *func;
- if (!pevent)
+ if (!tep)
return NULL;
- for (func = pevent->func_handlers; func; func = func->next) {
+ for (func = tep->func_handlers; func; func = func->next) {
if (strcmp(func->name, func_name) == 0)
break;
}
@@ -2957,12 +2956,12 @@ find_func_handler(struct tep_handle *pevent, char *func_name)
return func;
}
-static void remove_func_handler(struct tep_handle *pevent, char *func_name)
+static void remove_func_handler(struct tep_handle *tep, char *func_name)
{
struct tep_function_handler *func;
struct tep_function_handler **next;
- next = &pevent->func_handlers;
+ next = &tep->func_handlers;
while ((func = *next)) {
if (strcmp(func->name, func_name) == 0) {
*next = func->next;
@@ -3076,7 +3075,7 @@ process_function(struct tep_event *event, struct tep_print_arg *arg,
return process_dynamic_array_len(event, arg, tok);
}
- func = find_func_handler(event->pevent, token);
+ func = find_func_handler(event->tep, token);
if (func) {
free_token(token);
return process_func_handler(event, func, arg, tok);
@@ -3357,14 +3356,14 @@ tep_find_any_field(struct tep_event *event, const char *name)
/**
* tep_read_number - read a number from data
- * @pevent: handle for the pevent
+ * @tep: a handle to the trace event parser context
* @ptr: the raw data
* @size: the size of the data that holds the number
*
* Returns the number (converted to host) from the
* raw data.
*/
-unsigned long long tep_read_number(struct tep_handle *pevent,
+unsigned long long tep_read_number(struct tep_handle *tep,
const void *ptr, int size)
{
unsigned long long val;
@@ -3373,12 +3372,12 @@ unsigned long long tep_read_number(struct tep_handle *pevent,
case 1:
return *(unsigned char *)ptr;
case 2:
- return tep_data2host2(pevent, *(unsigned short *)ptr);
+ return tep_data2host2(tep, *(unsigned short *)ptr);
case 4:
- return tep_data2host4(pevent, *(unsigned int *)ptr);
+ return tep_data2host4(tep, *(unsigned int *)ptr);
case 8:
memcpy(&val, (ptr), sizeof(unsigned long long));
- return tep_data2host8(pevent, val);
+ return tep_data2host8(tep, val);
default:
/* BUG! */
return 0;
@@ -3406,7 +3405,7 @@ int tep_read_number_field(struct tep_format_field *field, const void *data,
case 2:
case 4:
case 8:
- *value = tep_read_number(field->event->pevent,
+ *value = tep_read_number(field->event->tep,
data + field->offset, field->size);
return 0;
default:
@@ -3414,7 +3413,7 @@ int tep_read_number_field(struct tep_format_field *field, const void *data,
}
}
-static int get_common_info(struct tep_handle *pevent,
+static int get_common_info(struct tep_handle *tep,
const char *type, int *offset, int *size)
{
struct tep_event *event;
@@ -3424,12 +3423,12 @@ static int get_common_info(struct tep_handle *pevent,
* All events should have the same common elements.
* Pick any event to find where the type is;
*/
- if (!pevent->events) {
+ if (!tep->events) {
do_warning("no event_list!");
return -1;
}
- event = pevent->events[0];
+ event = tep->events[0];
field = tep_find_common_field(event, type);
if (!field)
return -1;
@@ -3440,58 +3439,58 @@ static int get_common_info(struct tep_handle *pevent,
return 0;
}
-static int __parse_common(struct tep_handle *pevent, void *data,
+static int __parse_common(struct tep_handle *tep, void *data,
int *size, int *offset, const char *name)
{
int ret;
if (!*size) {
- ret = get_common_info(pevent, name, offset, size);
+ ret = get_common_info(tep, name, offset, size);
if (ret < 0)
return ret;
}
- return tep_read_number(pevent, data + *offset, *size);
+ return tep_read_number(tep, data + *offset, *size);
}
-static int trace_parse_common_type(struct tep_handle *pevent, void *data)
+static int trace_parse_common_type(struct tep_handle *tep, void *data)
{
- return __parse_common(pevent, data,
- &pevent->type_size, &pevent->type_offset,
+ return __parse_common(tep, data,
+ &tep->type_size, &tep->type_offset,
"common_type");
}
-static int parse_common_pid(struct tep_handle *pevent, void *data)
+static int parse_common_pid(struct tep_handle *tep, void *data)
{
- return __parse_common(pevent, data,
- &pevent->pid_size, &pevent->pid_offset,
+ return __parse_common(tep, data,
+ &tep->pid_size, &tep->pid_offset,
"common_pid");
}
-static int parse_common_pc(struct tep_handle *pevent, void *data)
+static int parse_common_pc(struct tep_handle *tep, void *data)
{
- return __parse_common(pevent, data,
- &pevent->pc_size, &pevent->pc_offset,
+ return __parse_common(tep, data,
+ &tep->pc_size, &tep->pc_offset,
"common_preempt_count");
}
-static int parse_common_flags(struct tep_handle *pevent, void *data)
+static int parse_common_flags(struct tep_handle *tep, void *data)
{
- return __parse_common(pevent, data,
- &pevent->flags_size, &pevent->flags_offset,
+ return __parse_common(tep, data,
+ &tep->flags_size, &tep->flags_offset,
"common_flags");
}
-static int parse_common_lock_depth(struct tep_handle *pevent, void *data)
+static int parse_common_lock_depth(struct tep_handle *tep, void *data)
{
- return __parse_common(pevent, data,
- &pevent->ld_size, &pevent->ld_offset,
+ return __parse_common(tep, data,
+ &tep->ld_size, &tep->ld_offset,
"common_lock_depth");
}
-static int parse_common_migrate_disable(struct tep_handle *pevent, void *data)
+static int parse_common_migrate_disable(struct tep_handle *tep, void *data)
{
- return __parse_common(pevent, data,
- &pevent->ld_size, &pevent->ld_offset,
+ return __parse_common(tep, data,
+ &tep->ld_size, &tep->ld_offset,
"common_migrate_disable");
}
@@ -3499,28 +3498,28 @@ static int events_id_cmp(const void *a, const void *b);
/**
* tep_find_event - find an event by given id
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @id: the id of the event
*
* Returns an event that has a given @id.
*/
-struct tep_event *tep_find_event(struct tep_handle *pevent, int id)
+struct tep_event *tep_find_event(struct tep_handle *tep, int id)
{
struct tep_event **eventptr;
struct tep_event key;
struct tep_event *pkey = &key;
/* Check cache first */
- if (pevent->last_event && pevent->last_event->id == id)
- return pevent->last_event;
+ if (tep->last_event && tep->last_event->id == id)
+ return tep->last_event;
key.id = id;
- eventptr = bsearch(&pkey, pevent->events, pevent->nr_events,
- sizeof(*pevent->events), events_id_cmp);
+ eventptr = bsearch(&pkey, tep->events, tep->nr_events,
+ sizeof(*tep->events), events_id_cmp);
if (eventptr) {
- pevent->last_event = *eventptr;
+ tep->last_event = *eventptr;
return *eventptr;
}
@@ -3529,7 +3528,7 @@ struct tep_event *tep_find_event(struct tep_handle *pevent, int id)
/**
* tep_find_event_by_name - find an event by given name
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @sys: the system name to search for
* @name: the name of the event to search for
*
@@ -3537,19 +3536,19 @@ struct tep_event *tep_find_event(struct tep_handle *pevent, int id)
* @sys. If @sys is NULL the first event with @name is returned.
*/
struct tep_event *
-tep_find_event_by_name(struct tep_handle *pevent,
+tep_find_event_by_name(struct tep_handle *tep,
const char *sys, const char *name)
{
struct tep_event *event = NULL;
int i;
- if (pevent->last_event &&
- strcmp(pevent->last_event->name, name) == 0 &&
- (!sys || strcmp(pevent->last_event->system, sys) == 0))
- return pevent->last_event;
+ if (tep->last_event &&
+ strcmp(tep->last_event->name, name) == 0 &&
+ (!sys || strcmp(tep->last_event->system, sys) == 0))
+ return tep->last_event;
- for (i = 0; i < pevent->nr_events; i++) {
- event = pevent->events[i];
+ for (i = 0; i < tep->nr_events; i++) {
+ event = tep->events[i];
if (strcmp(event->name, name) == 0) {
if (!sys)
break;
@@ -3557,17 +3556,17 @@ tep_find_event_by_name(struct tep_handle *pevent,
break;
}
}
- if (i == pevent->nr_events)
+ if (i == tep->nr_events)
event = NULL;
- pevent->last_event = event;
+ tep->last_event = event;
return event;
}
static unsigned long long
eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg *arg)
{
- struct tep_handle *pevent = event->pevent;
+ struct tep_handle *tep = event->tep;
unsigned long long val = 0;
unsigned long long left, right;
struct tep_print_arg *typearg = NULL;
@@ -3589,7 +3588,7 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg
}
/* must be a number */
- val = tep_read_number(pevent, data + arg->field.field->offset,
+ val = tep_read_number(tep, data + arg->field.field->offset,
arg->field.field->size);
break;
case TEP_PRINT_FLAGS:
@@ -3629,11 +3628,11 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg
}
/* Default to long size */
- field_size = pevent->long_size;
+ field_size = tep->long_size;
switch (larg->type) {
case TEP_PRINT_DYNAMIC_ARRAY:
- offset = tep_read_number(pevent,
+ offset = tep_read_number(tep,
data + larg->dynarray.field->offset,
larg->dynarray.field->size);
if (larg->dynarray.field->elementsize)
@@ -3662,7 +3661,7 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg
default:
goto default_op; /* oops, all bets off */
}
- val = tep_read_number(pevent,
+ val = tep_read_number(tep,
data + offset, field_size);
if (typearg)
val = eval_type(val, typearg, 1);
@@ -3763,7 +3762,7 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg
}
break;
case TEP_PRINT_DYNAMIC_ARRAY_LEN:
- offset = tep_read_number(pevent,
+ offset = tep_read_number(tep,
data + arg->dynarray.field->offset,
arg->dynarray.field->size);
/*
@@ -3775,7 +3774,7 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg
break;
case TEP_PRINT_DYNAMIC_ARRAY:
/* Without [], we pass the address to the dynamic data */
- offset = tep_read_number(pevent,
+ offset = tep_read_number(tep,
data + arg->dynarray.field->offset,
arg->dynarray.field->size);
/*
@@ -3850,7 +3849,7 @@ static void print_str_to_seq(struct trace_seq *s, const char *format,
trace_seq_printf(s, format, str);
}
-static void print_bitmask_to_seq(struct tep_handle *pevent,
+static void print_bitmask_to_seq(struct tep_handle *tep,
struct trace_seq *s, const char *format,
int len_arg, const void *data, int size)
{
@@ -3882,7 +3881,7 @@ static void print_bitmask_to_seq(struct tep_handle *pevent,
* In the kernel, this is an array of long words, thus
* endianness is very important.
*/
- if (pevent->file_bigendian)
+ if (tep->file_bigendian)
index = size - (len + 1);
else
index = len;
@@ -3908,7 +3907,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
struct tep_event *event, const char *format,
int len_arg, struct tep_print_arg *arg)
{
- struct tep_handle *pevent = event->pevent;
+ struct tep_handle *tep = event->tep;
struct tep_print_flag_sym *flag;
struct tep_format_field *field;
struct printk_map *printk;
@@ -3945,7 +3944,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
* is a pointer.
*/
if (!(field->flags & TEP_FIELD_IS_ARRAY) &&
- field->size == pevent->long_size) {
+ field->size == tep->long_size) {
/* Handle heterogeneous recording and processing
* architectures
@@ -3960,12 +3959,12 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
* on 32-bit devices:
* In this case, 64 bits must be read.
*/
- addr = (pevent->long_size == 8) ?
+ addr = (tep->long_size == 8) ?
*(unsigned long long *)(data + field->offset) :
(unsigned long long)*(unsigned int *)(data + field->offset);
/* Check if it matches a print format */
- printk = find_printk(pevent, addr);
+ printk = find_printk(tep, addr);
if (printk)
trace_seq_puts(s, printk->printk);
else
@@ -4022,7 +4021,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
case TEP_PRINT_HEX_STR:
if (arg->hex.field->type == TEP_PRINT_DYNAMIC_ARRAY) {
unsigned long offset;
- offset = tep_read_number(pevent,
+ offset = tep_read_number(tep,
data + arg->hex.field->dynarray.field->offset,
arg->hex.field->dynarray.field->size);
hex = data + (offset & 0xffff);
@@ -4053,7 +4052,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
unsigned long offset;
struct tep_format_field *field =
arg->int_array.field->dynarray.field;
- offset = tep_read_number(pevent,
+ offset = tep_read_number(tep,
data + field->offset,
field->size);
num = data + (offset & 0xffff);
@@ -4104,7 +4103,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
f = tep_find_any_field(event, arg->string.string);
arg->string.offset = f->offset;
}
- str_offset = tep_data2host4(pevent, *(unsigned int *)(data + arg->string.offset));
+ str_offset = tep_data2host4(tep, *(unsigned int *)(data + arg->string.offset));
str_offset &= 0xffff;
print_str_to_seq(s, format, len_arg, ((char *)data) + str_offset);
break;
@@ -4122,10 +4121,10 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
f = tep_find_any_field(event, arg->bitmask.bitmask);
arg->bitmask.offset = f->offset;
}
- bitmask_offset = tep_data2host4(pevent, *(unsigned int *)(data + arg->bitmask.offset));
+ bitmask_offset = tep_data2host4(tep, *(unsigned int *)(data + arg->bitmask.offset));
bitmask_size = bitmask_offset >> 16;
bitmask_offset &= 0xffff;
- print_bitmask_to_seq(pevent, s, format, len_arg,
+ print_bitmask_to_seq(tep, s, format, len_arg,
data + bitmask_offset, bitmask_size);
break;
}
@@ -4257,7 +4256,7 @@ static void free_args(struct tep_print_arg *args)
static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, struct tep_event *event)
{
- struct tep_handle *pevent = event->pevent;
+ struct tep_handle *tep = event->tep;
struct tep_format_field *field, *ip_field;
struct tep_print_arg *args, *arg, **next;
unsigned long long ip, val;
@@ -4265,8 +4264,8 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
void *bptr;
int vsize = 0;
- field = pevent->bprint_buf_field;
- ip_field = pevent->bprint_ip_field;
+ field = tep->bprint_buf_field;
+ ip_field = tep->bprint_ip_field;
if (!field) {
field = tep_find_field(event, "buf");
@@ -4279,11 +4278,11 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
do_warning_event(event, "can't find ip field for binary printk");
return NULL;
}
- pevent->bprint_buf_field = field;
- pevent->bprint_ip_field = ip_field;
+ tep->bprint_buf_field = field;
+ tep->bprint_ip_field = ip_field;
}
- ip = tep_read_number(pevent, data + ip_field->offset, ip_field->size);
+ ip = tep_read_number(tep, data + ip_field->offset, ip_field->size);
/*
* The first arg is the IP pointer.
@@ -4338,6 +4337,7 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
case 'S':
case 'f':
case 'F':
+ case 'x':
break;
default:
/*
@@ -4360,7 +4360,7 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
vsize = 4;
break;
case 1:
- vsize = pevent->long_size;
+ vsize = tep->long_size;
break;
case 2:
vsize = 8;
@@ -4377,7 +4377,7 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
/* the pointers are always 4 bytes aligned */
bptr = (void *)(((unsigned long)bptr + 3) &
~3);
- val = tep_read_number(pevent, bptr, vsize);
+ val = tep_read_number(tep, bptr, vsize);
bptr += vsize;
arg = alloc_arg();
if (!arg) {
@@ -4434,13 +4434,13 @@ static char *
get_bprint_format(void *data, int size __maybe_unused,
struct tep_event *event)
{
- struct tep_handle *pevent = event->pevent;
+ struct tep_handle *tep = event->tep;
unsigned long long addr;
struct tep_format_field *field;
struct printk_map *printk;
char *format;
- field = pevent->bprint_fmt_field;
+ field = tep->bprint_fmt_field;
if (!field) {
field = tep_find_field(event, "fmt");
@@ -4448,12 +4448,12 @@ get_bprint_format(void *data, int size __maybe_unused,
do_warning_event(event, "can't find format field for binary printk");
return NULL;
}
- pevent->bprint_fmt_field = field;
+ tep->bprint_fmt_field = field;
}
- addr = tep_read_number(pevent, data + field->offset, field->size);
+ addr = tep_read_number(tep, data + field->offset, field->size);
- printk = find_printk(pevent, addr);
+ printk = find_printk(tep, addr);
if (!printk) {
if (asprintf(&format, "%%pf: (NO FORMAT FOUND at %llx)\n", addr) < 0)
return NULL;
@@ -4835,13 +4835,13 @@ void tep_print_field(struct trace_seq *s, void *data,
{
unsigned long long val;
unsigned int offset, len, i;
- struct tep_handle *pevent = field->event->pevent;
+ struct tep_handle *tep = field->event->tep;
if (field->flags & TEP_FIELD_IS_ARRAY) {
offset = field->offset;
len = field->size;
if (field->flags & TEP_FIELD_IS_DYNAMIC) {
- val = tep_read_number(pevent, data + offset, len);
+ val = tep_read_number(tep, data + offset, len);
offset = val;
len = offset >> 16;
offset &= 0xffff;
@@ -4861,7 +4861,7 @@ void tep_print_field(struct trace_seq *s, void *data,
field->flags &= ~TEP_FIELD_IS_STRING;
}
} else {
- val = tep_read_number(pevent, data + field->offset,
+ val = tep_read_number(tep, data + field->offset,
field->size);
if (field->flags & TEP_FIELD_IS_POINTER) {
trace_seq_printf(s, "0x%llx", val);
@@ -4910,7 +4910,7 @@ void tep_print_fields(struct trace_seq *s, void *data,
static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_event *event)
{
- struct tep_handle *pevent = event->pevent;
+ struct tep_handle *tep = event->tep;
struct tep_print_fmt *print_fmt = &event->print_fmt;
struct tep_print_arg *arg = print_fmt->args;
struct tep_print_arg *args = NULL;
@@ -5002,7 +5002,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
case '-':
goto cont_process;
case 'p':
- if (pevent->long_size == 4)
+ if (tep->long_size == 4)
ls = 1;
else
ls = 2;
@@ -5063,7 +5063,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
arg = arg->next;
if (show_func) {
- func = find_func(pevent, val);
+ func = find_func(tep, val);
if (func) {
trace_seq_puts(s, func->func);
if (show_func == 'F')
@@ -5073,7 +5073,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
break;
}
}
- if (pevent->long_size == 8 && ls == 1 &&
+ if (tep->long_size == 8 && ls == 1 &&
sizeof(long) != 8) {
char *p;
@@ -5171,8 +5171,8 @@ out_failed:
}
/**
- * tep_data_lat_fmt - parse the data for the latency format
- * @pevent: a handle to the pevent
+ * tep_data_latency_format - parse the data for the latency format
+ * @tep: a handle to the trace event parser context
* @s: the trace_seq to write to
* @record: the record to read from
*
@@ -5180,8 +5180,8 @@ out_failed:
* need rescheduling, in hard/soft interrupt, preempt count
* and lock depth) and places it into the trace_seq.
*/
-void tep_data_lat_fmt(struct tep_handle *pevent,
- struct trace_seq *s, struct tep_record *record)
+void tep_data_latency_format(struct tep_handle *tep,
+ struct trace_seq *s, struct tep_record *record)
{
static int check_lock_depth = 1;
static int check_migrate_disable = 1;
@@ -5195,13 +5195,13 @@ void tep_data_lat_fmt(struct tep_handle *pevent,
int softirq;
void *data = record->data;
- lat_flags = parse_common_flags(pevent, data);
- pc = parse_common_pc(pevent, data);
+ lat_flags = parse_common_flags(tep, data);
+ pc = parse_common_pc(tep, data);
/* lock_depth may not always exist */
if (lock_depth_exists)
- lock_depth = parse_common_lock_depth(pevent, data);
+ lock_depth = parse_common_lock_depth(tep, data);
else if (check_lock_depth) {
- lock_depth = parse_common_lock_depth(pevent, data);
+ lock_depth = parse_common_lock_depth(tep, data);
if (lock_depth < 0)
check_lock_depth = 0;
else
@@ -5210,9 +5210,9 @@ void tep_data_lat_fmt(struct tep_handle *pevent,
/* migrate_disable may not always exist */
if (migrate_disable_exists)
- migrate_disable = parse_common_migrate_disable(pevent, data);
+ migrate_disable = parse_common_migrate_disable(tep, data);
else if (check_migrate_disable) {
- migrate_disable = parse_common_migrate_disable(pevent, data);
+ migrate_disable = parse_common_migrate_disable(tep, data);
if (migrate_disable < 0)
check_migrate_disable = 0;
else
@@ -5255,79 +5255,79 @@ void tep_data_lat_fmt(struct tep_handle *pevent,
/**
* tep_data_type - parse out the given event type
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @rec: the record to read from
*
* This returns the event id from the @rec.
*/
-int tep_data_type(struct tep_handle *pevent, struct tep_record *rec)
+int tep_data_type(struct tep_handle *tep, struct tep_record *rec)
{
- return trace_parse_common_type(pevent, rec->data);
+ return trace_parse_common_type(tep, rec->data);
}
/**
* tep_data_pid - parse the PID from record
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @rec: the record to parse
*
* This returns the PID from a record.
*/
-int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec)
+int tep_data_pid(struct tep_handle *tep, struct tep_record *rec)
{
- return parse_common_pid(pevent, rec->data);
+ return parse_common_pid(tep, rec->data);
}
/**
* tep_data_preempt_count - parse the preempt count from the record
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @rec: the record to parse
*
* This returns the preempt count from a record.
*/
-int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec)
+int tep_data_preempt_count(struct tep_handle *tep, struct tep_record *rec)
{
- return parse_common_pc(pevent, rec->data);
+ return parse_common_pc(tep, rec->data);
}
/**
* tep_data_flags - parse the latency flags from the record
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @rec: the record to parse
*
* This returns the latency flags from a record.
*
* Use trace_flag_type enum for the flags (see event-parse.h).
*/
-int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec)
+int tep_data_flags(struct tep_handle *tep, struct tep_record *rec)
{
- return parse_common_flags(pevent, rec->data);
+ return parse_common_flags(tep, rec->data);
}
/**
* tep_data_comm_from_pid - return the command line from PID
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @pid: the PID of the task to search for
*
* This returns a pointer to the command line that has the given
* @pid.
*/
-const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid)
+const char *tep_data_comm_from_pid(struct tep_handle *tep, int pid)
{
const char *comm;
- comm = find_cmdline(pevent, pid);
+ comm = find_cmdline(tep, pid);
return comm;
}
static struct tep_cmdline *
-pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline *next)
+pid_from_cmdlist(struct tep_handle *tep, const char *comm, struct tep_cmdline *next)
{
struct cmdline_list *cmdlist = (struct cmdline_list *)next;
if (cmdlist)
cmdlist = cmdlist->next;
else
- cmdlist = pevent->cmdlist;
+ cmdlist = tep->cmdlist;
while (cmdlist && strcmp(cmdlist->comm, comm) != 0)
cmdlist = cmdlist->next;
@@ -5337,7 +5337,7 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline
/**
* tep_data_pid_from_comm - return the pid from a given comm
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @comm: the cmdline to find the pid from
* @next: the cmdline structure to find the next comm
*
@@ -5348,7 +5348,7 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline
* next pid.
* Also, it does a linear search, so it may be slow.
*/
-struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
+struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *tep, const char *comm,
struct tep_cmdline *next)
{
struct tep_cmdline *cmdline;
@@ -5357,25 +5357,25 @@ struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char
* If the cmdlines have not been converted yet, then use
* the list.
*/
- if (!pevent->cmdlines)
- return pid_from_cmdlist(pevent, comm, next);
+ if (!tep->cmdlines)
+ return pid_from_cmdlist(tep, comm, next);
if (next) {
/*
* The next pointer could have been still from
* a previous call before cmdlines were created
*/
- if (next < pevent->cmdlines ||
- next >= pevent->cmdlines + pevent->cmdline_count)
+ if (next < tep->cmdlines ||
+ next >= tep->cmdlines + tep->cmdline_count)
next = NULL;
else
cmdline = next++;
}
if (!next)
- cmdline = pevent->cmdlines;
+ cmdline = tep->cmdlines;
- while (cmdline < pevent->cmdlines + pevent->cmdline_count) {
+ while (cmdline < tep->cmdlines + tep->cmdline_count) {
if (strcmp(cmdline->comm, comm) == 0)
return cmdline;
cmdline++;
@@ -5385,12 +5385,13 @@ struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char
/**
* tep_cmdline_pid - return the pid associated to a given cmdline
+ * @tep: a handle to the trace event parser context
* @cmdline: The cmdline structure to get the pid from
*
* Returns the pid for a give cmdline. If @cmdline is NULL, then
* -1 is returned.
*/
-int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline)
+int tep_cmdline_pid(struct tep_handle *tep, struct tep_cmdline *cmdline)
{
struct cmdline_list *cmdlist = (struct cmdline_list *)cmdline;
@@ -5401,9 +5402,9 @@ int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline)
* If cmdlines have not been created yet, or cmdline is
* not part of the array, then treat it as a cmdlist instead.
*/
- if (!pevent->cmdlines ||
- cmdline < pevent->cmdlines ||
- cmdline >= pevent->cmdlines + pevent->cmdline_count)
+ if (!tep->cmdlines ||
+ cmdline < tep->cmdlines ||
+ cmdline >= tep->cmdlines + tep->cmdline_count)
return cmdlist->pid;
return cmdline->pid;
@@ -5423,7 +5424,7 @@ void tep_event_info(struct trace_seq *s, struct tep_event *event,
{
int print_pretty = 1;
- if (event->pevent->print_raw || (event->flags & TEP_EVENT_FL_PRINTRAW))
+ if (event->tep->print_raw || (event->flags & TEP_EVENT_FL_PRINTRAW))
tep_print_fields(s, record->data, record->size, event);
else {
@@ -5444,7 +5445,8 @@ static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock)
return true;
if (!strcmp(trace_clock, "local") || !strcmp(trace_clock, "global")
- || !strcmp(trace_clock, "uptime") || !strcmp(trace_clock, "perf"))
+ || !strcmp(trace_clock, "uptime") || !strcmp(trace_clock, "perf")
+ || !strncmp(trace_clock, "mono", 4))
return true;
/* trace_clock is setting in tsc or counter mode */
@@ -5453,14 +5455,14 @@ static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock)
/**
* tep_find_event_by_record - return the event from a given record
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @record: The record to get the event from
*
* Returns the associated event for a given record, or NULL if non is
* is found.
*/
struct tep_event *
-tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record)
+tep_find_event_by_record(struct tep_handle *tep, struct tep_record *record)
{
int type;
@@ -5469,21 +5471,21 @@ tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record)
return NULL;
}
- type = trace_parse_common_type(pevent, record->data);
+ type = trace_parse_common_type(tep, record->data);
- return tep_find_event(pevent, type);
+ return tep_find_event(tep, type);
}
/**
* tep_print_event_task - Write the event task comm, pid and CPU
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @s: the trace_seq to write to
* @event: the handle to the record's event
* @record: The record to get the event from
*
* Writes the tasks comm, pid and CPU to @s.
*/
-void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_task(struct tep_handle *tep, struct trace_seq *s,
struct tep_event *event,
struct tep_record *record)
{
@@ -5491,27 +5493,26 @@ void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
const char *comm;
int pid;
- pid = parse_common_pid(pevent, data);
- comm = find_cmdline(pevent, pid);
+ pid = parse_common_pid(tep, data);
+ comm = find_cmdline(tep, pid);
- if (pevent->latency_format) {
- trace_seq_printf(s, "%8.8s-%-5d %3d",
- comm, pid, record->cpu);
- } else
+ if (tep->latency_format)
+ trace_seq_printf(s, "%8.8s-%-5d %3d", comm, pid, record->cpu);
+ else
trace_seq_printf(s, "%16s-%-5d [%03d]", comm, pid, record->cpu);
}
/**
* tep_print_event_time - Write the event timestamp
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @s: the trace_seq to write to
* @event: the handle to the record's event
* @record: The record to get the event from
- * @use_trace_clock: Set to parse according to the @pevent->trace_clock
+ * @use_trace_clock: Set to parse according to the @tep->trace_clock
*
* Writes the timestamp of the record into @s.
*/
-void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_time(struct tep_handle *tep, struct trace_seq *s,
struct tep_event *event,
struct tep_record *record,
bool use_trace_clock)
@@ -5522,19 +5523,18 @@ void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
int p;
bool use_usec_format;
- use_usec_format = is_timestamp_in_us(pevent->trace_clock,
- use_trace_clock);
+ use_usec_format = is_timestamp_in_us(tep->trace_clock, use_trace_clock);
if (use_usec_format) {
secs = record->ts / NSEC_PER_SEC;
nsecs = record->ts - secs * NSEC_PER_SEC;
}
- if (pevent->latency_format) {
- tep_data_lat_fmt(pevent, s, record);
+ if (tep->latency_format) {
+ tep_data_latency_format(tep, s, record);
}
if (use_usec_format) {
- if (pevent->flags & TEP_NSEC_OUTPUT) {
+ if (tep->flags & TEP_NSEC_OUTPUT) {
usecs = nsecs;
p = 9;
} else {
@@ -5554,14 +5554,14 @@ void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
/**
* tep_print_event_data - Write the event data section
- * @pevent: a handle to the pevent
+ * @tep: a handle to the trace event parser context
* @s: the trace_seq to write to
* @event: the handle to the record's event
* @record: The record to get the event from
*
* Writes the parsing of the record's data to @s.
*/
-void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_data(struct tep_handle *tep, struct trace_seq *s,
struct tep_event *event,
struct tep_record *record)
{
@@ -5578,15 +5578,15 @@ void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
tep_event_info(s, event, record);
}
-void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event(struct tep_handle *tep, struct trace_seq *s,
struct tep_record *record, bool use_trace_clock)
{
struct tep_event *event;
- event = tep_find_event_by_record(pevent, record);
+ event = tep_find_event_by_record(tep, record);
if (!event) {
int i;
- int type = trace_parse_common_type(pevent, record->data);
+ int type = trace_parse_common_type(tep, record->data);
do_warning("ug! no event found for type %d", type);
trace_seq_printf(s, "[UNKNOWN TYPE %d]", type);
@@ -5596,9 +5596,9 @@ void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
return;
}
- tep_print_event_task(pevent, s, event, record);
- tep_print_event_time(pevent, s, event, record, use_trace_clock);
- tep_print_event_data(pevent, s, event, record);
+ tep_print_event_task(tep, s, event, record);
+ tep_print_event_time(tep, s, event, record, use_trace_clock);
+ tep_print_event_data(tep, s, event, record);
}
static int events_id_cmp(const void *a, const void *b)
@@ -5649,32 +5649,26 @@ static int events_system_cmp(const void *a, const void *b)
return events_id_cmp(a, b);
}
-struct tep_event **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type sort_type)
+static struct tep_event **list_events_copy(struct tep_handle *tep)
{
struct tep_event **events;
- int (*sort)(const void *a, const void *b);
-
- events = pevent->sort_events;
-
- if (events && pevent->last_type == sort_type)
- return events;
- if (!events) {
- events = malloc(sizeof(*events) * (pevent->nr_events + 1));
- if (!events)
- return NULL;
+ if (!tep)
+ return NULL;
- memcpy(events, pevent->events, sizeof(*events) * pevent->nr_events);
- events[pevent->nr_events] = NULL;
+ events = malloc(sizeof(*events) * (tep->nr_events + 1));
+ if (!events)
+ return NULL;
- pevent->sort_events = events;
+ memcpy(events, tep->events, sizeof(*events) * tep->nr_events);
+ events[tep->nr_events] = NULL;
+ return events;
+}
- /* the internal events are sorted by id */
- if (sort_type == TEP_EVENT_SORT_ID) {
- pevent->last_type = sort_type;
- return events;
- }
- }
+static void list_events_sort(struct tep_event **events, int nr_events,
+ enum tep_event_sort_type sort_type)
+{
+ int (*sort)(const void *a, const void *b);
switch (sort_type) {
case TEP_EVENT_SORT_ID:
@@ -5687,11 +5681,82 @@ struct tep_event **tep_list_events(struct tep_handle *pevent, enum tep_event_sor
sort = events_system_cmp;
break;
default:
+ sort = NULL;
+ }
+
+ if (sort)
+ qsort(events, nr_events, sizeof(*events), sort);
+}
+
+/**
+ * tep_list_events - Get events, sorted by given criteria.
+ * @tep: a handle to the tep context
+ * @sort_type: desired sort order of the events in the array
+ *
+ * Returns an array of pointers to all events, sorted by the given
+ * @sort_type criteria. The last element of the array is NULL. The returned
+ * memory must not be freed, it is managed by the library.
+ * The function is not thread safe.
+ */
+struct tep_event **tep_list_events(struct tep_handle *tep,
+ enum tep_event_sort_type sort_type)
+{
+ struct tep_event **events;
+
+ if (!tep)
+ return NULL;
+
+ events = tep->sort_events;
+ if (events && tep->last_type == sort_type)
return events;
+
+ if (!events) {
+ events = list_events_copy(tep);
+ if (!events)
+ return NULL;
+
+ tep->sort_events = events;
+
+ /* the internal events are sorted by id */
+ if (sort_type == TEP_EVENT_SORT_ID) {
+ tep->last_type = sort_type;
+ return events;
+ }
}
- qsort(events, pevent->nr_events, sizeof(*events), sort);
- pevent->last_type = sort_type;
+ list_events_sort(events, tep->nr_events, sort_type);
+ tep->last_type = sort_type;
+
+ return events;
+}
+
+
+/**
+ * tep_list_events_copy - Thread safe version of tep_list_events()
+ * @tep: a handle to the tep context
+ * @sort_type: desired sort order of the events in the array
+ *
+ * Returns an array of pointers to all events, sorted by the given
+ * @sort_type criteria. The last element of the array is NULL. The returned
+ * array is newly allocated inside the function and must be freed by the caller
+ */
+struct tep_event **tep_list_events_copy(struct tep_handle *tep,
+ enum tep_event_sort_type sort_type)
+{
+ struct tep_event **events;
+
+ if (!tep)
+ return NULL;
+
+ events = list_events_copy(tep);
+ if (!events)
+ return NULL;
+
+ /* the internal events are sorted by id */
+ if (sort_type == TEP_EVENT_SORT_ID)
+ return events;
+
+ list_events_sort(events, tep->nr_events, sort_type);
return events;
}
@@ -5950,7 +6015,7 @@ static void parse_header_field(const char *field,
/**
* tep_parse_header_page - parse the data stored in the header page
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
* @buf: the buffer storing the header page format string
* @size: the size of @buf
* @long_size: the long size to use if there is no header
@@ -5960,7 +6025,7 @@ static void parse_header_field(const char *field,
*
* /sys/kernel/debug/tracing/events/header_page
*/
-int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long size,
+int tep_parse_header_page(struct tep_handle *tep, char *buf, unsigned long size,
int long_size)
{
int ignore;
@@ -5970,22 +6035,22 @@ int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long si
* Old kernels did not have header page info.
* Sorry but we just use what we find here in user space.
*/
- pevent->header_page_ts_size = sizeof(long long);
- pevent->header_page_size_size = long_size;
- pevent->header_page_data_offset = sizeof(long long) + long_size;
- pevent->old_format = 1;
+ tep->header_page_ts_size = sizeof(long long);
+ tep->header_page_size_size = long_size;
+ tep->header_page_data_offset = sizeof(long long) + long_size;
+ tep->old_format = 1;
return -1;
}
init_input_buf(buf, size);
- parse_header_field("timestamp", &pevent->header_page_ts_offset,
- &pevent->header_page_ts_size, 1);
- parse_header_field("commit", &pevent->header_page_size_offset,
- &pevent->header_page_size_size, 1);
- parse_header_field("overwrite", &pevent->header_page_overwrite,
+ parse_header_field("timestamp", &tep->header_page_ts_offset,
+ &tep->header_page_ts_size, 1);
+ parse_header_field("commit", &tep->header_page_size_offset,
+ &tep->header_page_size_size, 1);
+ parse_header_field("overwrite", &tep->header_page_overwrite,
&ignore, 0);
- parse_header_field("data", &pevent->header_page_data_offset,
- &pevent->header_page_data_size, 1);
+ parse_header_field("data", &tep->header_page_data_offset,
+ &tep->header_page_data_size, 1);
return 0;
}
@@ -6013,11 +6078,11 @@ static void free_handler(struct event_handler *handle)
free(handle);
}
-static int find_event_handle(struct tep_handle *pevent, struct tep_event *event)
+static int find_event_handle(struct tep_handle *tep, struct tep_event *event)
{
struct event_handler *handle, **next;
- for (next = &pevent->handlers; *next;
+ for (next = &tep->handlers; *next;
next = &(*next)->next) {
handle = *next;
if (event_matches(event, handle->id,
@@ -6055,7 +6120,7 @@ static int find_event_handle(struct tep_handle *pevent, struct tep_event *event)
* /sys/kernel/debug/tracing/events/.../.../format
*/
enum tep_errno __tep_parse_format(struct tep_event **eventp,
- struct tep_handle *pevent, const char *buf,
+ struct tep_handle *tep, const char *buf,
unsigned long size, const char *sys)
{
struct tep_event *event;
@@ -6097,8 +6162,8 @@ enum tep_errno __tep_parse_format(struct tep_event **eventp,
goto event_alloc_failed;
}
- /* Add pevent to event so that it can be referenced */
- event->pevent = pevent;
+ /* Add tep to event so that it can be referenced */
+ event->tep = tep;
ret = event_read_format(event);
if (ret < 0) {
@@ -6110,7 +6175,7 @@ enum tep_errno __tep_parse_format(struct tep_event **eventp,
* If the event has an override, don't print warnings if the event
* print format fails to parse.
*/
- if (pevent && find_event_handle(pevent, event))
+ if (tep && find_event_handle(tep, event))
show_warning = 0;
ret = event_read_print(event);
@@ -6162,18 +6227,18 @@ enum tep_errno __tep_parse_format(struct tep_event **eventp,
}
static enum tep_errno
-__parse_event(struct tep_handle *pevent,
+__parse_event(struct tep_handle *tep,
struct tep_event **eventp,
const char *buf, unsigned long size,
const char *sys)
{
- int ret = __tep_parse_format(eventp, pevent, buf, size, sys);
+ int ret = __tep_parse_format(eventp, tep, buf, size, sys);
struct tep_event *event = *eventp;
if (event == NULL)
return ret;
- if (pevent && add_event(pevent, event)) {
+ if (tep && add_event(tep, event)) {
ret = TEP_ERRNO__MEM_ALLOC_FAILED;
goto event_add_failed;
}
@@ -6191,7 +6256,7 @@ event_add_failed:
/**
* tep_parse_format - parse the event format
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
* @eventp: returned format
* @buf: the buffer storing the event format string
* @size: the size of @buf
@@ -6204,17 +6269,17 @@ event_add_failed:
*
* /sys/kernel/debug/tracing/events/.../.../format
*/
-enum tep_errno tep_parse_format(struct tep_handle *pevent,
+enum tep_errno tep_parse_format(struct tep_handle *tep,
struct tep_event **eventp,
const char *buf,
unsigned long size, const char *sys)
{
- return __parse_event(pevent, eventp, buf, size, sys);
+ return __parse_event(tep, eventp, buf, size, sys);
}
/**
* tep_parse_event - parse the event format
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
* @buf: the buffer storing the event format string
* @size: the size of @buf
* @sys: the system the event belongs to
@@ -6226,11 +6291,11 @@ enum tep_errno tep_parse_format(struct tep_handle *pevent,
*
* /sys/kernel/debug/tracing/events/.../.../format
*/
-enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf,
+enum tep_errno tep_parse_event(struct tep_handle *tep, const char *buf,
unsigned long size, const char *sys)
{
struct tep_event *event = NULL;
- return __parse_event(pevent, &event, buf, size, sys);
+ return __parse_event(tep, &event, buf, size, sys);
}
int get_field_val(struct trace_seq *s, struct tep_format_field *field,
@@ -6292,8 +6357,8 @@ void *tep_get_field_raw(struct trace_seq *s, struct tep_event *event,
offset = field->offset;
if (field->flags & TEP_FIELD_IS_DYNAMIC) {
- offset = tep_read_number(event->pevent,
- data + offset, field->size);
+ offset = tep_read_number(event->tep,
+ data + offset, field->size);
*len = offset >> 16;
offset &= 0xffff;
} else
@@ -6386,7 +6451,8 @@ int tep_get_any_field_val(struct trace_seq *s, struct tep_event *event,
* @record: The record with the field name.
* @err: print default error if failed.
*
- * Returns: 0 on success, -1 field not found, or 1 if buffer is full.
+ * Returns positive value on success, negative in case of an error,
+ * or 0 if buffer is full.
*/
int tep_print_num_field(struct trace_seq *s, const char *fmt,
struct tep_event *event, const char *name,
@@ -6418,14 +6484,15 @@ int tep_print_num_field(struct trace_seq *s, const char *fmt,
* @record: The record with the field name.
* @err: print default error if failed.
*
- * Returns: 0 on success, -1 field not found, or 1 if buffer is full.
+ * Returns positive value on success, negative in case of an error,
+ * or 0 if buffer is full.
*/
int tep_print_func_field(struct trace_seq *s, const char *fmt,
struct tep_event *event, const char *name,
struct tep_record *record, int err)
{
struct tep_format_field *field = tep_find_field(event, name);
- struct tep_handle *pevent = event->pevent;
+ struct tep_handle *tep = event->tep;
unsigned long long val;
struct func_map *func;
char tmp[128];
@@ -6436,7 +6503,7 @@ int tep_print_func_field(struct trace_seq *s, const char *fmt,
if (tep_read_number_field(field, record->data, &val))
goto failed;
- func = find_func(pevent, val);
+ func = find_func(tep, val);
if (func)
snprintf(tmp, 128, "%s/0x%llx", func->func, func->addr - val);
@@ -6468,7 +6535,7 @@ static void free_func_handle(struct tep_function_handler *func)
/**
* tep_register_print_function - register a helper function
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
* @func: the function to process the helper function
* @ret_type: the return type of the helper function
* @name: the name of the helper function
@@ -6481,7 +6548,7 @@ static void free_func_handle(struct tep_function_handler *func)
* The @parameters is a variable list of tep_func_arg_type enums that
* must end with TEP_FUNC_ARG_VOID.
*/
-int tep_register_print_function(struct tep_handle *pevent,
+int tep_register_print_function(struct tep_handle *tep,
tep_func_handler func,
enum tep_func_arg_type ret_type,
char *name, ...)
@@ -6493,7 +6560,7 @@ int tep_register_print_function(struct tep_handle *pevent,
va_list ap;
int ret;
- func_handle = find_func_handler(pevent, name);
+ func_handle = find_func_handler(tep, name);
if (func_handle) {
/*
* This is most like caused by the users own
@@ -6501,7 +6568,7 @@ int tep_register_print_function(struct tep_handle *pevent,
* system defaults.
*/
pr_stat("override of function helper '%s'", name);
- remove_func_handler(pevent, name);
+ remove_func_handler(tep, name);
}
func_handle = calloc(1, sizeof(*func_handle));
@@ -6548,8 +6615,8 @@ int tep_register_print_function(struct tep_handle *pevent,
}
va_end(ap);
- func_handle->next = pevent->func_handlers;
- pevent->func_handlers = func_handle;
+ func_handle->next = tep->func_handlers;
+ tep->func_handlers = func_handle;
return 0;
out_free:
@@ -6560,7 +6627,7 @@ int tep_register_print_function(struct tep_handle *pevent,
/**
* tep_unregister_print_function - unregister a helper function
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
* @func: the function to process the helper function
* @name: the name of the helper function
*
@@ -6568,20 +6635,20 @@ int tep_register_print_function(struct tep_handle *pevent,
*
* Returns 0 if the handler was removed successully, -1 otherwise.
*/
-int tep_unregister_print_function(struct tep_handle *pevent,
+int tep_unregister_print_function(struct tep_handle *tep,
tep_func_handler func, char *name)
{
struct tep_function_handler *func_handle;
- func_handle = find_func_handler(pevent, name);
+ func_handle = find_func_handler(tep, name);
if (func_handle && func_handle->func == func) {
- remove_func_handler(pevent, name);
+ remove_func_handler(tep, name);
return 0;
}
return -1;
}
-static struct tep_event *search_event(struct tep_handle *pevent, int id,
+static struct tep_event *search_event(struct tep_handle *tep, int id,
const char *sys_name,
const char *event_name)
{
@@ -6589,7 +6656,7 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id,
if (id >= 0) {
/* search by id */
- event = tep_find_event(pevent, id);
+ event = tep_find_event(tep, id);
if (!event)
return NULL;
if (event_name && (strcmp(event_name, event->name) != 0))
@@ -6597,7 +6664,7 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id,
if (sys_name && (strcmp(sys_name, event->system) != 0))
return NULL;
} else {
- event = tep_find_event_by_name(pevent, sys_name, event_name);
+ event = tep_find_event_by_name(tep, sys_name, event_name);
if (!event)
return NULL;
}
@@ -6606,7 +6673,7 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id,
/**
* tep_register_event_handler - register a way to parse an event
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
* @id: the id of the event to register
* @sys_name: the system name the event belongs to
* @event_name: the name of the event
@@ -6627,14 +6694,14 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id,
* negative TEP_ERRNO_... in case of an error
*
*/
-int tep_register_event_handler(struct tep_handle *pevent, int id,
+int tep_register_event_handler(struct tep_handle *tep, int id,
const char *sys_name, const char *event_name,
tep_event_handler_func func, void *context)
{
struct tep_event *event;
struct event_handler *handle;
- event = search_event(pevent, id, sys_name, event_name);
+ event = search_event(tep, id, sys_name, event_name);
if (event == NULL)
goto not_found;
@@ -6669,8 +6736,8 @@ int tep_register_event_handler(struct tep_handle *pevent, int id,
}
handle->func = func;
- handle->next = pevent->handlers;
- pevent->handlers = handle;
+ handle->next = tep->handlers;
+ tep->handlers = handle;
handle->context = context;
return TEP_REGISTER_SUCCESS;
@@ -6697,7 +6764,7 @@ static int handle_matches(struct event_handler *handler, int id,
/**
* tep_unregister_event_handler - unregister an existing event handler
- * @pevent: the handle to the pevent
+ * @tep: a handle to the trace event parser context
* @id: the id of the event to unregister
* @sys_name: the system name the handler belongs to
* @event_name: the name of the event handler
@@ -6711,7 +6778,7 @@ static int handle_matches(struct event_handler *handler, int id,
*
* Returns 0 if handler was removed successfully, -1 if event was not found.
*/
-int tep_unregister_event_handler(struct tep_handle *pevent, int id,
+int tep_unregister_event_handler(struct tep_handle *tep, int id,
const char *sys_name, const char *event_name,
tep_event_handler_func func, void *context)
{
@@ -6719,7 +6786,7 @@ int tep_unregister_event_handler(struct tep_handle *pevent, int id,
struct event_handler *handle;
struct event_handler **next;
- event = search_event(pevent, id, sys_name, event_name);
+ event = search_event(tep, id, sys_name, event_name);
if (event == NULL)
goto not_found;
@@ -6733,7 +6800,7 @@ int tep_unregister_event_handler(struct tep_handle *pevent, int id,
}
not_found:
- for (next = &pevent->handlers; *next; next = &(*next)->next) {
+ for (next = &tep->handlers; *next; next = &(*next)->next) {
handle = *next;
if (handle_matches(handle, id, sys_name, event_name,
func, context))
@@ -6750,23 +6817,23 @@ not_found:
}
/**
- * tep_alloc - create a pevent handle
+ * tep_alloc - create a tep handle
*/
struct tep_handle *tep_alloc(void)
{
- struct tep_handle *pevent = calloc(1, sizeof(*pevent));
+ struct tep_handle *tep = calloc(1, sizeof(*tep));
- if (pevent) {
- pevent->ref_count = 1;
- pevent->host_bigendian = tep_host_bigendian();
+ if (tep) {
+ tep->ref_count = 1;
+ tep->host_bigendian = tep_is_bigendian();
}
- return pevent;
+ return tep;
}
-void tep_ref(struct tep_handle *pevent)
+void tep_ref(struct tep_handle *tep)
{
- pevent->ref_count++;
+ tep->ref_count++;
}
int tep_get_ref(struct tep_handle *tep)
@@ -6816,10 +6883,10 @@ void tep_free_event(struct tep_event *event)
}
/**
- * tep_free - free a pevent handle
- * @pevent: the pevent handle to free
+ * tep_free - free a tep handle
+ * @tep: the tep handle to free
*/
-void tep_free(struct tep_handle *pevent)
+void tep_free(struct tep_handle *tep)
{
struct cmdline_list *cmdlist, *cmdnext;
struct func_list *funclist, *funcnext;
@@ -6828,21 +6895,21 @@ void tep_free(struct tep_handle *pevent)
struct event_handler *handle;
int i;
- if (!pevent)
+ if (!tep)
return;
- cmdlist = pevent->cmdlist;
- funclist = pevent->funclist;
- printklist = pevent->printklist;
+ cmdlist = tep->cmdlist;
+ funclist = tep->funclist;
+ printklist = tep->printklist;
- pevent->ref_count--;
- if (pevent->ref_count)
+ tep->ref_count--;
+ if (tep->ref_count)
return;
- if (pevent->cmdlines) {
- for (i = 0; i < pevent->cmdline_count; i++)
- free(pevent->cmdlines[i].comm);
- free(pevent->cmdlines);
+ if (tep->cmdlines) {
+ for (i = 0; i < tep->cmdline_count; i++)
+ free(tep->cmdlines[i].comm);
+ free(tep->cmdlines);
}
while (cmdlist) {
@@ -6852,12 +6919,12 @@ void tep_free(struct tep_handle *pevent)
cmdlist = cmdnext;
}
- if (pevent->func_map) {
- for (i = 0; i < (int)pevent->func_count; i++) {
- free(pevent->func_map[i].func);
- free(pevent->func_map[i].mod);
+ if (tep->func_map) {
+ for (i = 0; i < (int)tep->func_count; i++) {
+ free(tep->func_map[i].func);
+ free(tep->func_map[i].mod);
}
- free(pevent->func_map);
+ free(tep->func_map);
}
while (funclist) {
@@ -6868,16 +6935,16 @@ void tep_free(struct tep_handle *pevent)
funclist = funcnext;
}
- while (pevent->func_handlers) {
- func_handler = pevent->func_handlers;
- pevent->func_handlers = func_handler->next;
+ while (tep->func_handlers) {
+ func_handler = tep->func_handlers;
+ tep->func_handlers = func_handler->next;
free_func_handle(func_handler);
}
- if (pevent->printk_map) {
- for (i = 0; i < (int)pevent->printk_count; i++)
- free(pevent->printk_map[i].printk);
- free(pevent->printk_map);
+ if (tep->printk_map) {
+ for (i = 0; i < (int)tep->printk_count; i++)
+ free(tep->printk_map[i].printk);
+ free(tep->printk_map);
}
while (printklist) {
@@ -6887,24 +6954,24 @@ void tep_free(struct tep_handle *pevent)
printklist = printknext;
}
- for (i = 0; i < pevent->nr_events; i++)
- tep_free_event(pevent->events[i]);
+ for (i = 0; i < tep->nr_events; i++)
+ tep_free_event(tep->events[i]);
- while (pevent->handlers) {
- handle = pevent->handlers;
- pevent->handlers = handle->next;
+ while (tep->handlers) {
+ handle = tep->handlers;
+ tep->handlers = handle->next;
free_handler(handle);
}
- free(pevent->trace_clock);
- free(pevent->events);
- free(pevent->sort_events);
- free(pevent->func_resolver);
+ free(tep->trace_clock);
+ free(tep->events);
+ free(tep->sort_events);
+ free(tep->func_resolver);
- free(pevent);
+ free(tep);
}
-void tep_unref(struct tep_handle *pevent)
+void tep_unref(struct tep_handle *tep)
{
- tep_free(pevent);
+ tep_free(tep);
}
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index aec48f2aea8a..642f68ab5fb2 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -64,8 +64,8 @@ typedef int (*tep_event_handler_func)(struct trace_seq *s,
struct tep_event *event,
void *context);
-typedef int (*tep_plugin_load_func)(struct tep_handle *pevent);
-typedef int (*tep_plugin_unload_func)(struct tep_handle *pevent);
+typedef int (*tep_plugin_load_func)(struct tep_handle *tep);
+typedef int (*tep_plugin_unload_func)(struct tep_handle *tep);
struct tep_plugin_option {
struct tep_plugin_option *next;
@@ -85,12 +85,12 @@ struct tep_plugin_option {
* TEP_PLUGIN_LOADER: (required)
* The function name to initialized the plugin.
*
- * int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+ * int TEP_PLUGIN_LOADER(struct tep_handle *tep)
*
* TEP_PLUGIN_UNLOADER: (optional)
* The function called just before unloading
*
- * int TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+ * int TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
*
* TEP_PLUGIN_OPTIONS: (optional)
* Plugin options that can be set before loading
@@ -278,7 +278,7 @@ struct tep_print_fmt {
};
struct tep_event {
- struct tep_handle *pevent;
+ struct tep_handle *tep;
char *name;
int id;
int flags;
@@ -393,9 +393,9 @@ struct tep_plugin_list;
#define INVALID_PLUGIN_LIST_OPTION ((char **)((unsigned long)-1))
-struct tep_plugin_list *tep_load_plugins(struct tep_handle *pevent);
+struct tep_plugin_list *tep_load_plugins(struct tep_handle *tep);
void tep_unload_plugins(struct tep_plugin_list *plugin_list,
- struct tep_handle *pevent);
+ struct tep_handle *tep);
char **tep_plugin_list_options(void);
void tep_plugin_free_options_list(char **list);
int tep_plugin_add_options(const char *name,
@@ -409,8 +409,10 @@ void tep_print_plugins(struct trace_seq *s,
typedef char *(tep_func_resolver_t)(void *priv,
unsigned long long *addrp, char **modp);
void tep_set_flag(struct tep_handle *tep, int flag);
+void tep_clear_flag(struct tep_handle *tep, enum tep_flag flag);
+bool tep_test_flag(struct tep_handle *tep, enum tep_flag flags);
-static inline int tep_host_bigendian(void)
+static inline int tep_is_bigendian(void)
{
unsigned char str[] = { 0x1, 0x2, 0x3, 0x4 };
unsigned int val;
@@ -428,37 +430,37 @@ enum trace_flag_type {
TRACE_FLAG_SOFTIRQ = 0x10,
};
-int tep_set_function_resolver(struct tep_handle *pevent,
+int tep_set_function_resolver(struct tep_handle *tep,
tep_func_resolver_t *func, void *priv);
-void tep_reset_function_resolver(struct tep_handle *pevent);
-int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid);
-int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid);
-int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock);
-int tep_register_function(struct tep_handle *pevent, char *name,
+void tep_reset_function_resolver(struct tep_handle *tep);
+int tep_register_comm(struct tep_handle *tep, const char *comm, int pid);
+int tep_override_comm(struct tep_handle *tep, const char *comm, int pid);
+int tep_register_trace_clock(struct tep_handle *tep, const char *trace_clock);
+int tep_register_function(struct tep_handle *tep, char *name,
unsigned long long addr, char *mod);
-int tep_register_print_string(struct tep_handle *pevent, const char *fmt,
+int tep_register_print_string(struct tep_handle *tep, const char *fmt,
unsigned long long addr);
-int tep_pid_is_registered(struct tep_handle *pevent, int pid);
+bool tep_is_pid_registered(struct tep_handle *tep, int pid);
-void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_task(struct tep_handle *tep, struct trace_seq *s,
struct tep_event *event,
struct tep_record *record);
-void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_time(struct tep_handle *tep, struct trace_seq *s,
struct tep_event *event,
struct tep_record *record,
bool use_trace_clock);
-void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event_data(struct tep_handle *tep, struct trace_seq *s,
struct tep_event *event,
struct tep_record *record);
-void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
+void tep_print_event(struct tep_handle *tep, struct trace_seq *s,
struct tep_record *record, bool use_trace_clock);
-int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long size,
+int tep_parse_header_page(struct tep_handle *tep, char *buf, unsigned long size,
int long_size);
-enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf,
+enum tep_errno tep_parse_event(struct tep_handle *tep, const char *buf,
unsigned long size, const char *sys);
-enum tep_errno tep_parse_format(struct tep_handle *pevent,
+enum tep_errno tep_parse_format(struct tep_handle *tep,
struct tep_event **eventp,
const char *buf,
unsigned long size, const char *sys);
@@ -490,50 +492,50 @@ enum tep_reg_handler {
TEP_REGISTER_SUCCESS_OVERWRITE,
};
-int tep_register_event_handler(struct tep_handle *pevent, int id,
+int tep_register_event_handler(struct tep_handle *tep, int id,
const char *sys_name, const char *event_name,
tep_event_handler_func func, void *context);
-int tep_unregister_event_handler(struct tep_handle *pevent, int id,
+int tep_unregister_event_handler(struct tep_handle *tep, int id,
const char *sys_name, const char *event_name,
tep_event_handler_func func, void *context);
-int tep_register_print_function(struct tep_handle *pevent,
+int tep_register_print_function(struct tep_handle *tep,
tep_func_handler func,
enum tep_func_arg_type ret_type,
char *name, ...);
-int tep_unregister_print_function(struct tep_handle *pevent,
+int tep_unregister_print_function(struct tep_handle *tep,
tep_func_handler func, char *name);
struct tep_format_field *tep_find_common_field(struct tep_event *event, const char *name);
struct tep_format_field *tep_find_field(struct tep_event *event, const char *name);
struct tep_format_field *tep_find_any_field(struct tep_event *event, const char *name);
-const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr);
+const char *tep_find_function(struct tep_handle *tep, unsigned long long addr);
unsigned long long
-tep_find_function_address(struct tep_handle *pevent, unsigned long long addr);
-unsigned long long tep_read_number(struct tep_handle *pevent, const void *ptr, int size);
+tep_find_function_address(struct tep_handle *tep, unsigned long long addr);
+unsigned long long tep_read_number(struct tep_handle *tep, const void *ptr, int size);
int tep_read_number_field(struct tep_format_field *field, const void *data,
unsigned long long *value);
struct tep_event *tep_get_first_event(struct tep_handle *tep);
int tep_get_events_count(struct tep_handle *tep);
-struct tep_event *tep_find_event(struct tep_handle *pevent, int id);
+struct tep_event *tep_find_event(struct tep_handle *tep, int id);
struct tep_event *
-tep_find_event_by_name(struct tep_handle *pevent, const char *sys, const char *name);
+tep_find_event_by_name(struct tep_handle *tep, const char *sys, const char *name);
struct tep_event *
-tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record);
-
-void tep_data_lat_fmt(struct tep_handle *pevent,
- struct trace_seq *s, struct tep_record *record);
-int tep_data_type(struct tep_handle *pevent, struct tep_record *rec);
-int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec);
-int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec);
-int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec);
-const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid);
+tep_find_event_by_record(struct tep_handle *tep, struct tep_record *record);
+
+void tep_data_latency_format(struct tep_handle *tep,
+ struct trace_seq *s, struct tep_record *record);
+int tep_data_type(struct tep_handle *tep, struct tep_record *rec);
+int tep_data_pid(struct tep_handle *tep, struct tep_record *rec);
+int tep_data_preempt_count(struct tep_handle *tep, struct tep_record *rec);
+int tep_data_flags(struct tep_handle *tep, struct tep_record *rec);
+const char *tep_data_comm_from_pid(struct tep_handle *tep, int pid);
struct tep_cmdline;
-struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
+struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *tep, const char *comm,
struct tep_cmdline *next);
-int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline);
+int tep_cmdline_pid(struct tep_handle *tep, struct tep_cmdline *cmdline);
void tep_print_field(struct trace_seq *s, void *data,
struct tep_format_field *field);
@@ -541,10 +543,12 @@ void tep_print_fields(struct trace_seq *s, void *data,
int size __maybe_unused, struct tep_event *event);
void tep_event_info(struct trace_seq *s, struct tep_event *event,
struct tep_record *record);
-int tep_strerror(struct tep_handle *pevent, enum tep_errno errnum,
+int tep_strerror(struct tep_handle *tep, enum tep_errno errnum,
char *buf, size_t buflen);
-struct tep_event **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type);
+struct tep_event **tep_list_events(struct tep_handle *tep, enum tep_event_sort_type);
+struct tep_event **tep_list_events_copy(struct tep_handle *tep,
+ enum tep_event_sort_type);
struct tep_format_field **tep_event_common_fields(struct tep_event *event);
struct tep_format_field **tep_event_fields(struct tep_event *event);
@@ -552,24 +556,28 @@ enum tep_endian {
TEP_LITTLE_ENDIAN = 0,
TEP_BIG_ENDIAN
};
-int tep_get_cpus(struct tep_handle *pevent);
-void tep_set_cpus(struct tep_handle *pevent, int cpus);
-int tep_get_long_size(struct tep_handle *pevent);
-void tep_set_long_size(struct tep_handle *pevent, int long_size);
-int tep_get_page_size(struct tep_handle *pevent);
-void tep_set_page_size(struct tep_handle *pevent, int _page_size);
-int tep_file_bigendian(struct tep_handle *pevent);
-void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian);
-int tep_is_host_bigendian(struct tep_handle *pevent);
-void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian);
-int tep_is_latency_format(struct tep_handle *pevent);
-void tep_set_latency_format(struct tep_handle *pevent, int lat);
-int tep_get_header_page_size(struct tep_handle *pevent);
+int tep_get_cpus(struct tep_handle *tep);
+void tep_set_cpus(struct tep_handle *tep, int cpus);
+int tep_get_long_size(struct tep_handle *tep);
+void tep_set_long_size(struct tep_handle *tep, int long_size);
+int tep_get_page_size(struct tep_handle *tep);
+void tep_set_page_size(struct tep_handle *tep, int _page_size);
+bool tep_is_file_bigendian(struct tep_handle *tep);
+void tep_set_file_bigendian(struct tep_handle *tep, enum tep_endian endian);
+bool tep_is_local_bigendian(struct tep_handle *tep);
+void tep_set_local_bigendian(struct tep_handle *tep, enum tep_endian endian);
+bool tep_is_latency_format(struct tep_handle *tep);
+void tep_set_latency_format(struct tep_handle *tep, int lat);
+int tep_get_header_page_size(struct tep_handle *tep);
+int tep_get_header_timestamp_size(struct tep_handle *tep);
+bool tep_is_old_format(struct tep_handle *tep);
+void tep_set_print_raw(struct tep_handle *tep, int print_raw);
+void tep_set_test_filters(struct tep_handle *tep, int test_filters);
struct tep_handle *tep_alloc(void);
-void tep_free(struct tep_handle *pevent);
-void tep_ref(struct tep_handle *pevent);
-void tep_unref(struct tep_handle *pevent);
+void tep_free(struct tep_handle *tep);
+void tep_ref(struct tep_handle *tep);
+void tep_unref(struct tep_handle *tep);
int tep_get_ref(struct tep_handle *tep);
/* access to the internal parser */
@@ -581,8 +589,8 @@ const char *tep_get_input_buf(void);
unsigned long long tep_get_input_buf_ptr(void);
/* for debugging */
-void tep_print_funcs(struct tep_handle *pevent);
-void tep_print_printk(struct tep_handle *pevent);
+void tep_print_funcs(struct tep_handle *tep);
+void tep_print_printk(struct tep_handle *tep);
/* ----------------------- filtering ----------------------- */
@@ -709,13 +717,13 @@ struct tep_filter_type {
#define TEP_FILTER_ERROR_BUFSZ 1024
struct tep_event_filter {
- struct tep_handle *pevent;
+ struct tep_handle *tep;
int filters;
struct tep_filter_type *event_filters;
char error_buffer[TEP_FILTER_ERROR_BUFSZ];
};
-struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent);
+struct tep_event_filter *tep_filter_alloc(struct tep_handle *tep);
/* for backward compatibility */
#define FILTER_NONE TEP_ERRNO__NO_FILTER
@@ -723,12 +731,6 @@ struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent);
#define FILTER_MISS TEP_ERRNO__FILTER_MISS
#define FILTER_MATCH TEP_ERRNO__FILTER_MATCH
-enum tep_filter_trivial_type {
- TEP_FILTER_TRIVIAL_FALSE,
- TEP_FILTER_TRIVIAL_TRUE,
- TEP_FILTER_TRIVIAL_BOTH,
-};
-
enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
const char *filter_str);
@@ -743,9 +745,6 @@ int tep_event_filtered(struct tep_event_filter *filter,
void tep_filter_reset(struct tep_event_filter *filter);
-int tep_filter_clear_trivial(struct tep_event_filter *filter,
- enum tep_filter_trivial_type type);
-
void tep_filter_free(struct tep_event_filter *filter);
char *tep_filter_make_string(struct tep_event_filter *filter, int event_id);
@@ -753,15 +752,8 @@ char *tep_filter_make_string(struct tep_event_filter *filter, int event_id);
int tep_filter_remove_event(struct tep_event_filter *filter,
int event_id);
-int tep_filter_event_has_trivial(struct tep_event_filter *filter,
- int event_id,
- enum tep_filter_trivial_type type);
-
int tep_filter_copy(struct tep_event_filter *dest, struct tep_event_filter *source);
-int tep_update_trivial(struct tep_event_filter *dest, struct tep_event_filter *source,
- enum tep_filter_trivial_type type);
-
int tep_filter_compare(struct tep_event_filter *filter1, struct tep_event_filter *filter2);
#endif /* _PARSE_EVENTS_H */
diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c
index e74f16c88398..8ca28de9337a 100644
--- a/tools/lib/traceevent/event-plugin.c
+++ b/tools/lib/traceevent/event-plugin.c
@@ -269,7 +269,7 @@ void tep_print_plugins(struct trace_seq *s,
}
static void
-load_plugin(struct tep_handle *pevent, const char *path,
+load_plugin(struct tep_handle *tep, const char *path,
const char *file, void *data)
{
struct tep_plugin_list **plugin_list = data;
@@ -316,7 +316,7 @@ load_plugin(struct tep_handle *pevent, const char *path,
*plugin_list = list;
pr_stat("registering plugin: %s", plugin);
- func(pevent);
+ func(tep);
return;
out_free:
@@ -324,9 +324,9 @@ load_plugin(struct tep_handle *pevent, const char *path,
}
static void
-load_plugins_dir(struct tep_handle *pevent, const char *suffix,
+load_plugins_dir(struct tep_handle *tep, const char *suffix,
const char *path,
- void (*load_plugin)(struct tep_handle *pevent,
+ void (*load_plugin)(struct tep_handle *tep,
const char *path,
const char *name,
void *data),
@@ -359,15 +359,15 @@ load_plugins_dir(struct tep_handle *pevent, const char *suffix,
if (strcmp(name + (strlen(name) - strlen(suffix)), suffix) != 0)
continue;
- load_plugin(pevent, path, name, data);
+ load_plugin(tep, path, name, data);
}
closedir(dir);
}
static void
-load_plugins(struct tep_handle *pevent, const char *suffix,
- void (*load_plugin)(struct tep_handle *pevent,
+load_plugins(struct tep_handle *tep, const char *suffix,
+ void (*load_plugin)(struct tep_handle *tep,
const char *path,
const char *name,
void *data),
@@ -378,7 +378,7 @@ load_plugins(struct tep_handle *pevent, const char *suffix,
char *envdir;
int ret;
- if (pevent->flags & TEP_DISABLE_PLUGINS)
+ if (tep->flags & TEP_DISABLE_PLUGINS)
return;
/*
@@ -386,8 +386,8 @@ load_plugins(struct tep_handle *pevent, const char *suffix,
* check that first.
*/
#ifdef PLUGIN_DIR
- if (!(pevent->flags & TEP_DISABLE_SYS_PLUGINS))
- load_plugins_dir(pevent, suffix, PLUGIN_DIR,
+ if (!(tep->flags & TEP_DISABLE_SYS_PLUGINS))
+ load_plugins_dir(tep, suffix, PLUGIN_DIR,
load_plugin, data);
#endif
@@ -397,7 +397,7 @@ load_plugins(struct tep_handle *pevent, const char *suffix,
*/
envdir = getenv("TRACEEVENT_PLUGIN_DIR");
if (envdir)
- load_plugins_dir(pevent, suffix, envdir, load_plugin, data);
+ load_plugins_dir(tep, suffix, envdir, load_plugin, data);
/*
* Now let the home directory override the environment
@@ -413,22 +413,22 @@ load_plugins(struct tep_handle *pevent, const char *suffix,
return;
}
- load_plugins_dir(pevent, suffix, path, load_plugin, data);
+ load_plugins_dir(tep, suffix, path, load_plugin, data);
free(path);
}
struct tep_plugin_list*
-tep_load_plugins(struct tep_handle *pevent)
+tep_load_plugins(struct tep_handle *tep)
{
struct tep_plugin_list *list = NULL;
- load_plugins(pevent, ".so", load_plugin, &list);
+ load_plugins(tep, ".so", load_plugin, &list);
return list;
}
void
-tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *pevent)
+tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *tep)
{
tep_plugin_unload_func func;
struct tep_plugin_list *list;
@@ -438,7 +438,7 @@ tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *peven
plugin_list = list->next;
func = dlsym(list->handle, TEP_PLUGIN_UNLOADER_NAME);
if (func)
- func(pevent);
+ func(tep);
dlclose(list->handle);
free(list->name);
free(list);
diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
index af2a1f3b7424..b887e7437d67 100644
--- a/tools/lib/traceevent/kbuffer-parse.c
+++ b/tools/lib/traceevent/kbuffer-parse.c
@@ -727,3 +727,52 @@ int kbuffer_start_of_data(struct kbuffer *kbuf)
{
return kbuf->start;
}
+
+/**
+ * kbuffer_raw_get - get raw buffer info
+ * @kbuf: The kbuffer
+ * @subbuf: Start of mapped subbuffer
+ * @info: Info descriptor to fill in
+ *
+ * For debugging. This can return internals of the ring buffer.
+ * Expects to have info->next set to what it will read.
+ * The type, length and timestamp delta will be filled in, and
+ * @info->next will be updated to the next element.
+ * The @subbuf is used to know if the info is passed the end of
+ * data and NULL will be returned if it is.
+ */
+struct kbuffer_raw_info *
+kbuffer_raw_get(struct kbuffer *kbuf, void *subbuf, struct kbuffer_raw_info *info)
+{
+ unsigned long long flags;
+ unsigned long long delta;
+ unsigned int type_len;
+ unsigned int size;
+ int start;
+ int length;
+ void *ptr = info->next;
+
+ if (!kbuf || !subbuf)
+ return NULL;
+
+ if (kbuf->flags & KBUFFER_FL_LONG_8)
+ start = 16;
+ else
+ start = 12;
+
+ flags = read_long(kbuf, subbuf + 8);
+ size = (unsigned int)flags & COMMIT_MASK;
+
+ if (ptr < subbuf || ptr >= subbuf + start + size)
+ return NULL;
+
+ type_len = translate_data(kbuf, ptr, &ptr, &delta, &length);
+
+ info->next = ptr + length;
+
+ info->type = type_len;
+ info->delta = delta;
+ info->length = length;
+
+ return info;
+}
diff --git a/tools/lib/traceevent/kbuffer.h b/tools/lib/traceevent/kbuffer.h
index 03dce757553f..ed4d697fc137 100644
--- a/tools/lib/traceevent/kbuffer.h
+++ b/tools/lib/traceevent/kbuffer.h
@@ -65,4 +65,17 @@ int kbuffer_subbuffer_size(struct kbuffer *kbuf);
void kbuffer_set_old_format(struct kbuffer *kbuf);
int kbuffer_start_of_data(struct kbuffer *kbuf);
+/* Debugging */
+
+struct kbuffer_raw_info {
+ int type;
+ int length;
+ unsigned long long delta;
+ void *next;
+};
+
+/* Read raw data */
+struct kbuffer_raw_info *kbuffer_raw_get(struct kbuffer *kbuf, void *subbuf,
+ struct kbuffer_raw_info *info);
+
#endif /* _K_BUFFER_H */
diff --git a/tools/lib/traceevent/libtraceevent.pc.template b/tools/lib/traceevent/libtraceevent.pc.template
index 42e4d6cb6b9e..86384fcd57f1 100644
--- a/tools/lib/traceevent/libtraceevent.pc.template
+++ b/tools/lib/traceevent/libtraceevent.pc.template
@@ -1,6 +1,6 @@
prefix=INSTALL_PREFIX
-libdir=${prefix}/lib64
-includedir=${prefix}/include/traceevent
+libdir=LIB_DIR
+includedir=HEADER_DIR
Name: libtraceevent
URL: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index cb5ce66dab6e..552592d153fb 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -154,7 +154,7 @@ add_filter_type(struct tep_event_filter *filter, int id)
filter_type = &filter->event_filters[i];
filter_type->event_id = id;
- filter_type->event = tep_find_event(filter->pevent, id);
+ filter_type->event = tep_find_event(filter->tep, id);
filter_type->filter = NULL;
filter->filters++;
@@ -164,9 +164,9 @@ add_filter_type(struct tep_event_filter *filter, int id)
/**
* tep_filter_alloc - create a new event filter
- * @pevent: The pevent that this filter is associated with
+ * @tep: The tep that this filter is associated with
*/
-struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent)
+struct tep_event_filter *tep_filter_alloc(struct tep_handle *tep)
{
struct tep_event_filter *filter;
@@ -175,8 +175,8 @@ struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent)
return NULL;
memset(filter, 0, sizeof(*filter));
- filter->pevent = pevent;
- tep_ref(pevent);
+ filter->tep = tep;
+ tep_ref(tep);
return filter;
}
@@ -256,7 +256,7 @@ static int event_match(struct tep_event *event,
}
static enum tep_errno
-find_event(struct tep_handle *pevent, struct event_list **events,
+find_event(struct tep_handle *tep, struct event_list **events,
char *sys_name, char *event_name)
{
struct tep_event *event;
@@ -299,8 +299,8 @@ find_event(struct tep_handle *pevent, struct event_list **events,
}
}
- for (i = 0; i < pevent->nr_events; i++) {
- event = pevent->events[i];
+ for (i = 0; i < tep->nr_events; i++) {
+ event = tep->events[i];
if (event_match(event, sys_name ? &sreg : NULL, &ereg)) {
match = 1;
if (add_event(events, event) < 0) {
@@ -1257,7 +1257,7 @@ static void filter_init_error_buf(struct tep_event_filter *filter)
enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
const char *filter_str)
{
- struct tep_handle *pevent = filter->pevent;
+ struct tep_handle *tep = filter->tep;
struct event_list *event;
struct event_list *events = NULL;
const char *filter_start;
@@ -1313,7 +1313,7 @@ enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
}
/* Find this event */
- ret = find_event(pevent, &events, strim(sys_name), strim(event_name));
+ ret = find_event(tep, &events, strim(sys_name), strim(event_name));
if (ret < 0) {
free_events(events);
free(this_event);
@@ -1334,7 +1334,7 @@ enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
if (ret < 0)
rtn = ret;
- if (ret >= 0 && pevent->test_filters) {
+ if (ret >= 0 && tep->test_filters) {
char *test;
test = tep_filter_make_string(filter, event->event->id);
if (test) {
@@ -1346,9 +1346,6 @@ enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
free_events(events);
- if (rtn >= 0 && pevent->test_filters)
- exit(0);
-
return rtn;
}
@@ -1380,7 +1377,7 @@ int tep_filter_strerror(struct tep_event_filter *filter, enum tep_errno err,
return 0;
}
- return tep_strerror(filter->pevent, err, buf, buflen);
+ return tep_strerror(filter->tep, err, buf, buflen);
}
/**
@@ -1443,7 +1440,7 @@ void tep_filter_reset(struct tep_event_filter *filter)
void tep_filter_free(struct tep_event_filter *filter)
{
- tep_unref(filter->pevent);
+ tep_unref(filter->tep);
tep_filter_reset(filter);
@@ -1462,10 +1459,10 @@ static int copy_filter_type(struct tep_event_filter *filter,
const char *name;
char *str;
- /* Can't assume that the pevent's are the same */
+ /* Can't assume that the tep's are the same */
sys = filter_type->event->system;
name = filter_type->event->name;
- event = tep_find_event_by_name(filter->pevent, sys, name);
+ event = tep_find_event_by_name(filter->tep, sys, name);
if (!event)
return -1;
@@ -1522,167 +1519,6 @@ int tep_filter_copy(struct tep_event_filter *dest, struct tep_event_filter *sour
return ret;
}
-
-/**
- * tep_update_trivial - update the trivial filters with the given filter
- * @dest - the filter to update
- * @source - the filter as the source of the update
- * @type - the type of trivial filter to update.
- *
- * Scan dest for trivial events matching @type to replace with the source.
- *
- * Returns 0 on success and -1 if there was a problem updating, but
- * events may have still been updated on error.
- */
-int tep_update_trivial(struct tep_event_filter *dest, struct tep_event_filter *source,
- enum tep_filter_trivial_type type)
-{
- struct tep_handle *src_pevent;
- struct tep_handle *dest_pevent;
- struct tep_event *event;
- struct tep_filter_type *filter_type;
- struct tep_filter_arg *arg;
- char *str;
- int i;
-
- src_pevent = source->pevent;
- dest_pevent = dest->pevent;
-
- /* Do nothing if either of the filters has nothing to filter */
- if (!dest->filters || !source->filters)
- return 0;
-
- for (i = 0; i < dest->filters; i++) {
- filter_type = &dest->event_filters[i];
- arg = filter_type->filter;
- if (arg->type != TEP_FILTER_ARG_BOOLEAN)
- continue;
- if ((arg->boolean.value && type == TEP_FILTER_TRIVIAL_FALSE) ||
- (!arg->boolean.value && type == TEP_FILTER_TRIVIAL_TRUE))
- continue;
-
- event = filter_type->event;
-
- if (src_pevent != dest_pevent) {
- /* do a look up */
- event = tep_find_event_by_name(src_pevent,
- event->system,
- event->name);
- if (!event)
- return -1;
- }
-
- str = tep_filter_make_string(source, event->id);
- if (!str)
- continue;
-
- /* Don't bother if the filter is trivial too */
- if (strcmp(str, "TRUE") != 0 && strcmp(str, "FALSE") != 0)
- filter_event(dest, event, str, NULL);
- free(str);
- }
- return 0;
-}
-
-/**
- * tep_filter_clear_trivial - clear TRUE and FALSE filters
- * @filter: the filter to remove trivial filters from
- * @type: remove only true, false, or both
- *
- * Removes filters that only contain a TRUE or FALES boolean arg.
- *
- * Returns 0 on success and -1 if there was a problem.
- */
-int tep_filter_clear_trivial(struct tep_event_filter *filter,
- enum tep_filter_trivial_type type)
-{
- struct tep_filter_type *filter_type;
- int count = 0;
- int *ids = NULL;
- int i;
-
- if (!filter->filters)
- return 0;
-
- /*
- * Two steps, first get all ids with trivial filters.
- * then remove those ids.
- */
- for (i = 0; i < filter->filters; i++) {
- int *new_ids;
-
- filter_type = &filter->event_filters[i];
- if (filter_type->filter->type != TEP_FILTER_ARG_BOOLEAN)
- continue;
- switch (type) {
- case TEP_FILTER_TRIVIAL_FALSE:
- if (filter_type->filter->boolean.value)
- continue;
- break;
- case TEP_FILTER_TRIVIAL_TRUE:
- if (!filter_type->filter->boolean.value)
- continue;
- default:
- break;
- }
-
- new_ids = realloc(ids, sizeof(*ids) * (count + 1));
- if (!new_ids) {
- free(ids);
- return -1;
- }
-
- ids = new_ids;
- ids[count++] = filter_type->event_id;
- }
-
- if (!count)
- return 0;
-
- for (i = 0; i < count; i++)
- tep_filter_remove_event(filter, ids[i]);
-
- free(ids);
- return 0;
-}
-
-/**
- * tep_filter_event_has_trivial - return true event contains trivial filter
- * @filter: the filter with the information
- * @event_id: the id of the event to test
- * @type: trivial type to test for (TRUE, FALSE, EITHER)
- *
- * Returns 1 if the event contains a matching trivial type
- * otherwise 0.
- */
-int tep_filter_event_has_trivial(struct tep_event_filter *filter,
- int event_id,
- enum tep_filter_trivial_type type)
-{
- struct tep_filter_type *filter_type;
-
- if (!filter->filters)
- return 0;
-
- filter_type = find_filter_type(filter, event_id);
-
- if (!filter_type)
- return 0;
-
- if (filter_type->filter->type != TEP_FILTER_ARG_BOOLEAN)
- return 0;
-
- switch (type) {
- case TEP_FILTER_TRIVIAL_FALSE:
- return !filter_type->filter->boolean.value;
-
- case TEP_FILTER_TRIVIAL_TRUE:
- return filter_type->filter->boolean.value;
- default:
- return 1;
- }
-}
-
static int test_filter(struct tep_event *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err);
@@ -1692,8 +1528,8 @@ get_comm(struct tep_event *event, struct tep_record *record)
const char *comm;
int pid;
- pid = tep_data_pid(event->pevent, record);
- comm = tep_data_comm_from_pid(event->pevent, pid);
+ pid = tep_data_pid(event->tep, record);
+ comm = tep_data_comm_from_pid(event->tep, pid);
return comm;
}
@@ -1861,7 +1697,7 @@ static int test_num(struct tep_event *event, struct tep_filter_arg *arg,
static const char *get_field_str(struct tep_filter_arg *arg, struct tep_record *record)
{
struct tep_event *event;
- struct tep_handle *pevent;
+ struct tep_handle *tep;
unsigned long long addr;
const char *val = NULL;
unsigned int size;
@@ -1891,12 +1727,12 @@ static const char *get_field_str(struct tep_filter_arg *arg, struct tep_record *
} else {
event = arg->str.field->event;
- pevent = event->pevent;
+ tep = event->tep;
addr = get_value(event, arg->str.field, record);
if (arg->str.field->flags & (TEP_FIELD_IS_POINTER | TEP_FIELD_IS_LONG))
/* convert to a kernel symbol */
- val = tep_find_function(pevent, addr);
+ val = tep_find_function(tep, addr);
if (val == NULL) {
/* just use the hex of the string name */
@@ -2036,7 +1872,7 @@ int tep_event_filtered(struct tep_event_filter *filter, int event_id)
enum tep_errno tep_filter_match(struct tep_event_filter *filter,
struct tep_record *record)
{
- struct tep_handle *pevent = filter->pevent;
+ struct tep_handle *tep = filter->tep;
struct tep_filter_type *filter_type;
int event_id;
int ret;
@@ -2047,7 +1883,7 @@ enum tep_errno tep_filter_match(struct tep_event_filter *filter,
if (!filter->filters)
return TEP_ERRNO__NO_FILTER;
- event_id = tep_data_type(pevent, record);
+ event_id = tep_data_type(tep, record);
filter_type = find_filter_type(filter, event_id);
if (!filter_type)
@@ -2409,14 +2245,6 @@ int tep_filter_compare(struct tep_event_filter *filter1, struct tep_event_filter
break;
if (filter_type1->filter->type != filter_type2->filter->type)
break;
- switch (filter_type1->filter->type) {
- case TEP_FILTER_TRIVIAL_FALSE:
- case TEP_FILTER_TRIVIAL_TRUE:
- /* trivial types just need the type compared */
- continue;
- default:
- break;
- }
/* The best way to compare complex filters is with strings */
str1 = arg_to_str(filter1, filter_type1->filter);
str2 = arg_to_str(filter2, filter_type2->filter);
diff --git a/tools/lib/traceevent/parse-utils.c b/tools/lib/traceevent/parse-utils.c
index 77e4ec6402dd..e99867111387 100644
--- a/tools/lib/traceevent/parse-utils.c
+++ b/tools/lib/traceevent/parse-utils.c
@@ -14,7 +14,7 @@
void __vwarning(const char *fmt, va_list ap)
{
if (errno)
- perror("trace-cmd");
+ perror("libtraceevent");
errno = 0;
fprintf(stderr, " ");
diff --git a/tools/lib/traceevent/plugin_cfg80211.c b/tools/lib/traceevent/plugin_cfg80211.c
index a51b366f47da..3d43b56a6c98 100644
--- a/tools/lib/traceevent/plugin_cfg80211.c
+++ b/tools/lib/traceevent/plugin_cfg80211.c
@@ -25,9 +25,9 @@ process___le16_to_cpup(struct trace_seq *s, unsigned long long *args)
return val ? (long long) le16toh(*val) : 0;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_print_function(pevent,
+ tep_register_print_function(tep,
process___le16_to_cpup,
TEP_FUNC_ARG_INT,
"__le16_to_cpup",
@@ -36,8 +36,8 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_print_function(pevent, process___le16_to_cpup,
+ tep_unregister_print_function(tep, process___le16_to_cpup,
"__le16_to_cpup");
}
diff --git a/tools/lib/traceevent/plugin_function.c b/tools/lib/traceevent/plugin_function.c
index a73eca34a8f9..7770fcb78e0f 100644
--- a/tools/lib/traceevent/plugin_function.c
+++ b/tools/lib/traceevent/plugin_function.c
@@ -126,7 +126,7 @@ static int add_and_get_index(const char *parent, const char *child, int cpu)
static int function_handler(struct trace_seq *s, struct tep_record *record,
struct tep_event *event, void *context)
{
- struct tep_handle *pevent = event->pevent;
+ struct tep_handle *tep = event->tep;
unsigned long long function;
unsigned long long pfunction;
const char *func;
@@ -136,12 +136,12 @@ static int function_handler(struct trace_seq *s, struct tep_record *record,
if (tep_get_field_val(s, event, "ip", record, &function, 1))
return trace_seq_putc(s, '!');
- func = tep_find_function(pevent, function);
+ func = tep_find_function(tep, function);
if (tep_get_field_val(s, event, "parent_ip", record, &pfunction, 1))
return trace_seq_putc(s, '!');
- parent = tep_find_function(pevent, pfunction);
+ parent = tep_find_function(tep, pfunction);
if (parent && ftrace_indent->set)
index = add_and_get_index(parent, func, record->cpu);
@@ -164,9 +164,9 @@ static int function_handler(struct trace_seq *s, struct tep_record *record,
return 0;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_event_handler(pevent, -1, "ftrace", "function",
+ tep_register_event_handler(tep, -1, "ftrace", "function",
function_handler, NULL);
tep_plugin_add_options("ftrace", plugin_options);
@@ -174,11 +174,11 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
int i, x;
- tep_unregister_event_handler(pevent, -1, "ftrace", "function",
+ tep_unregister_event_handler(tep, -1, "ftrace", "function",
function_handler, NULL);
for (i = 0; i <= cpus; i++) {
diff --git a/tools/lib/traceevent/plugin_hrtimer.c b/tools/lib/traceevent/plugin_hrtimer.c
index 5db5e401275f..bb434e0ed03a 100644
--- a/tools/lib/traceevent/plugin_hrtimer.c
+++ b/tools/lib/traceevent/plugin_hrtimer.c
@@ -67,23 +67,23 @@ static int timer_start_handler(struct trace_seq *s,
return 0;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_event_handler(pevent, -1,
+ tep_register_event_handler(tep, -1,
"timer", "hrtimer_expire_entry",
timer_expire_handler, NULL);
- tep_register_event_handler(pevent, -1, "timer", "hrtimer_start",
+ tep_register_event_handler(tep, -1, "timer", "hrtimer_start",
timer_start_handler, NULL);
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_event_handler(pevent, -1,
+ tep_unregister_event_handler(tep, -1,
"timer", "hrtimer_expire_entry",
timer_expire_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "timer", "hrtimer_start",
+ tep_unregister_event_handler(tep, -1, "timer", "hrtimer_start",
timer_start_handler, NULL);
}
diff --git a/tools/lib/traceevent/plugin_jbd2.c b/tools/lib/traceevent/plugin_jbd2.c
index a5e34135dd6a..04fc125f38cb 100644
--- a/tools/lib/traceevent/plugin_jbd2.c
+++ b/tools/lib/traceevent/plugin_jbd2.c
@@ -48,16 +48,16 @@ process_jiffies_to_msecs(struct trace_seq *s, unsigned long long *args)
return jiffies;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_print_function(pevent,
+ tep_register_print_function(tep,
process_jbd2_dev_to_name,
TEP_FUNC_ARG_STRING,
"jbd2_dev_to_name",
TEP_FUNC_ARG_INT,
TEP_FUNC_ARG_VOID);
- tep_register_print_function(pevent,
+ tep_register_print_function(tep,
process_jiffies_to_msecs,
TEP_FUNC_ARG_LONG,
"jiffies_to_msecs",
@@ -66,11 +66,11 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_print_function(pevent, process_jbd2_dev_to_name,
+ tep_unregister_print_function(tep, process_jbd2_dev_to_name,
"jbd2_dev_to_name");
- tep_unregister_print_function(pevent, process_jiffies_to_msecs,
+ tep_unregister_print_function(tep, process_jiffies_to_msecs,
"jiffies_to_msecs");
}
diff --git a/tools/lib/traceevent/plugin_kmem.c b/tools/lib/traceevent/plugin_kmem.c
index 0e3c601f9ed1..edaec5d962c3 100644
--- a/tools/lib/traceevent/plugin_kmem.c
+++ b/tools/lib/traceevent/plugin_kmem.c
@@ -39,57 +39,57 @@ static int call_site_handler(struct trace_seq *s, struct tep_record *record,
if (tep_read_number_field(field, data, &val))
return 1;
- func = tep_find_function(event->pevent, val);
+ func = tep_find_function(event->tep, val);
if (!func)
return 1;
- addr = tep_find_function_address(event->pevent, val);
+ addr = tep_find_function_address(event->tep, val);
trace_seq_printf(s, "(%s+0x%x) ", func, (int)(val - addr));
return 1;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_event_handler(pevent, -1, "kmem", "kfree",
+ tep_register_event_handler(tep, -1, "kmem", "kfree",
call_site_handler, NULL);
- tep_register_event_handler(pevent, -1, "kmem", "kmalloc",
+ tep_register_event_handler(tep, -1, "kmem", "kmalloc",
call_site_handler, NULL);
- tep_register_event_handler(pevent, -1, "kmem", "kmalloc_node",
+ tep_register_event_handler(tep, -1, "kmem", "kmalloc_node",
call_site_handler, NULL);
- tep_register_event_handler(pevent, -1, "kmem", "kmem_cache_alloc",
+ tep_register_event_handler(tep, -1, "kmem", "kmem_cache_alloc",
call_site_handler, NULL);
- tep_register_event_handler(pevent, -1, "kmem",
+ tep_register_event_handler(tep, -1, "kmem",
"kmem_cache_alloc_node",
call_site_handler, NULL);
- tep_register_event_handler(pevent, -1, "kmem", "kmem_cache_free",
+ tep_register_event_handler(tep, -1, "kmem", "kmem_cache_free",
call_site_handler, NULL);
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_event_handler(pevent, -1, "kmem", "kfree",
+ tep_unregister_event_handler(tep, -1, "kmem", "kfree",
call_site_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kmem", "kmalloc",
+ tep_unregister_event_handler(tep, -1, "kmem", "kmalloc",
call_site_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kmem", "kmalloc_node",
+ tep_unregister_event_handler(tep, -1, "kmem", "kmalloc_node",
call_site_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kmem", "kmem_cache_alloc",
+ tep_unregister_event_handler(tep, -1, "kmem", "kmem_cache_alloc",
call_site_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kmem",
+ tep_unregister_event_handler(tep, -1, "kmem",
"kmem_cache_alloc_node",
call_site_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kmem", "kmem_cache_free",
+ tep_unregister_event_handler(tep, -1, "kmem", "kmem_cache_free",
call_site_handler, NULL);
}
diff --git a/tools/lib/traceevent/plugin_kvm.c b/tools/lib/traceevent/plugin_kvm.c
index 64b9c25a1fd3..c8e623065a7e 100644
--- a/tools/lib/traceevent/plugin_kvm.c
+++ b/tools/lib/traceevent/plugin_kvm.c
@@ -389,8 +389,8 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
* We can only use the structure if file is of the same
* endianness.
*/
- if (tep_file_bigendian(event->pevent) ==
- tep_is_host_bigendian(event->pevent)) {
+ if (tep_is_file_bigendian(event->tep) ==
+ tep_is_local_bigendian(event->tep)) {
trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s",
role.level,
@@ -445,40 +445,40 @@ process_is_writable_pte(struct trace_seq *s, unsigned long long *args)
return pte & PT_WRITABLE_MASK;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
init_disassembler();
- tep_register_event_handler(pevent, -1, "kvm", "kvm_exit",
+ tep_register_event_handler(tep, -1, "kvm", "kvm_exit",
kvm_exit_handler, NULL);
- tep_register_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
+ tep_register_event_handler(tep, -1, "kvm", "kvm_emulate_insn",
kvm_emulate_insn_handler, NULL);
- tep_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit",
+ tep_register_event_handler(tep, -1, "kvm", "kvm_nested_vmexit",
kvm_nested_vmexit_handler, NULL);
- tep_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject",
+ tep_register_event_handler(tep, -1, "kvm", "kvm_nested_vmexit_inject",
kvm_nested_vmexit_inject_handler, NULL);
- tep_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
+ tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_get_page",
kvm_mmu_get_page_handler, NULL);
- tep_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_sync_page",
+ tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_sync_page",
kvm_mmu_print_role, NULL);
- tep_register_event_handler(pevent, -1,
+ tep_register_event_handler(tep, -1,
"kvmmmu", "kvm_mmu_unsync_page",
kvm_mmu_print_role, NULL);
- tep_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_zap_page",
+ tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_zap_page",
kvm_mmu_print_role, NULL);
- tep_register_event_handler(pevent, -1, "kvmmmu",
+ tep_register_event_handler(tep, -1, "kvmmmu",
"kvm_mmu_prepare_zap_page", kvm_mmu_print_role,
NULL);
- tep_register_print_function(pevent,
+ tep_register_print_function(tep,
process_is_writable_pte,
TEP_FUNC_ARG_INT,
"is_writable_pte",
@@ -487,37 +487,37 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_event_handler(pevent, -1, "kvm", "kvm_exit",
+ tep_unregister_event_handler(tep, -1, "kvm", "kvm_exit",
kvm_exit_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
+ tep_unregister_event_handler(tep, -1, "kvm", "kvm_emulate_insn",
kvm_emulate_insn_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit",
+ tep_unregister_event_handler(tep, -1, "kvm", "kvm_nested_vmexit",
kvm_nested_vmexit_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject",
+ tep_unregister_event_handler(tep, -1, "kvm", "kvm_nested_vmexit_inject",
kvm_nested_vmexit_inject_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
+ tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_get_page",
kvm_mmu_get_page_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_sync_page",
+ tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_sync_page",
kvm_mmu_print_role, NULL);
- tep_unregister_event_handler(pevent, -1,
+ tep_unregister_event_handler(tep, -1,
"kvmmmu", "kvm_mmu_unsync_page",
kvm_mmu_print_role, NULL);
- tep_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_zap_page",
+ tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_zap_page",
kvm_mmu_print_role, NULL);
- tep_unregister_event_handler(pevent, -1, "kvmmmu",
+ tep_unregister_event_handler(tep, -1, "kvmmmu",
"kvm_mmu_prepare_zap_page", kvm_mmu_print_role,
NULL);
- tep_unregister_print_function(pevent, process_is_writable_pte,
+ tep_unregister_print_function(tep, process_is_writable_pte,
"is_writable_pte");
}
diff --git a/tools/lib/traceevent/plugin_mac80211.c b/tools/lib/traceevent/plugin_mac80211.c
index e38b9477aad2..884303c26b5c 100644
--- a/tools/lib/traceevent/plugin_mac80211.c
+++ b/tools/lib/traceevent/plugin_mac80211.c
@@ -87,17 +87,17 @@ static int drv_bss_info_changed(struct trace_seq *s,
return 0;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_event_handler(pevent, -1, "mac80211",
+ tep_register_event_handler(tep, -1, "mac80211",
"drv_bss_info_changed",
drv_bss_info_changed, NULL);
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_event_handler(pevent, -1, "mac80211",
+ tep_unregister_event_handler(tep, -1, "mac80211",
"drv_bss_info_changed",
drv_bss_info_changed, NULL);
}
diff --git a/tools/lib/traceevent/plugin_sched_switch.c b/tools/lib/traceevent/plugin_sched_switch.c
index 834c9e378ff8..957389a0ff7a 100644
--- a/tools/lib/traceevent/plugin_sched_switch.c
+++ b/tools/lib/traceevent/plugin_sched_switch.c
@@ -62,7 +62,7 @@ static void write_and_save_comm(struct tep_format_field *field,
comm = &s->buffer[len];
/* Help out the comm to ids. This will handle dups */
- tep_register_comm(field->event->pevent, comm, pid);
+ tep_register_comm(field->event->tep, comm, pid);
}
static int sched_wakeup_handler(struct trace_seq *s,
@@ -135,27 +135,27 @@ static int sched_switch_handler(struct trace_seq *s,
return 0;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_event_handler(pevent, -1, "sched", "sched_switch",
+ tep_register_event_handler(tep, -1, "sched", "sched_switch",
sched_switch_handler, NULL);
- tep_register_event_handler(pevent, -1, "sched", "sched_wakeup",
+ tep_register_event_handler(tep, -1, "sched", "sched_wakeup",
sched_wakeup_handler, NULL);
- tep_register_event_handler(pevent, -1, "sched", "sched_wakeup_new",
+ tep_register_event_handler(tep, -1, "sched", "sched_wakeup_new",
sched_wakeup_handler, NULL);
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_event_handler(pevent, -1, "sched", "sched_switch",
+ tep_unregister_event_handler(tep, -1, "sched", "sched_switch",
sched_switch_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "sched", "sched_wakeup",
+ tep_unregister_event_handler(tep, -1, "sched", "sched_wakeup",
sched_wakeup_handler, NULL);
- tep_unregister_event_handler(pevent, -1, "sched", "sched_wakeup_new",
+ tep_unregister_event_handler(tep, -1, "sched", "sched_wakeup_new",
sched_wakeup_handler, NULL);
}
diff --git a/tools/lib/traceevent/plugin_scsi.c b/tools/lib/traceevent/plugin_scsi.c
index 4eba25cc1431..5d0387a4b65a 100644
--- a/tools/lib/traceevent/plugin_scsi.c
+++ b/tools/lib/traceevent/plugin_scsi.c
@@ -414,9 +414,9 @@ unsigned long long process_scsi_trace_parse_cdb(struct trace_seq *s,
return 0;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_print_function(pevent,
+ tep_register_print_function(tep,
process_scsi_trace_parse_cdb,
TEP_FUNC_ARG_STRING,
"scsi_trace_parse_cdb",
@@ -427,8 +427,8 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_print_function(pevent, process_scsi_trace_parse_cdb,
+ tep_unregister_print_function(tep, process_scsi_trace_parse_cdb,
"scsi_trace_parse_cdb");
}
diff --git a/tools/lib/traceevent/plugin_xen.c b/tools/lib/traceevent/plugin_xen.c
index bc0496e4c296..993b208d0323 100644
--- a/tools/lib/traceevent/plugin_xen.c
+++ b/tools/lib/traceevent/plugin_xen.c
@@ -120,9 +120,9 @@ unsigned long long process_xen_hypercall_name(struct trace_seq *s,
return 0;
}
-int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *tep)
{
- tep_register_print_function(pevent,
+ tep_register_print_function(tep,
process_xen_hypercall_name,
TEP_FUNC_ARG_STRING,
"xen_hypercall_name",
@@ -131,8 +131,8 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
return 0;
}
-void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
{
- tep_unregister_print_function(pevent, process_xen_hypercall_name,
+ tep_unregister_print_function(tep, process_xen_hypercall_name,
"xen_hypercall_name");
}
diff --git a/tools/memory-model/.gitignore b/tools/memory-model/.gitignore
new file mode 100644
index 000000000000..b1d34c52f3c3
--- /dev/null
+++ b/tools/memory-model/.gitignore
@@ -0,0 +1 @@
+litmus
diff --git a/tools/memory-model/Documentation/explanation.txt b/tools/memory-model/Documentation/explanation.txt
index 35bff92cc773..68caa9a976d0 100644
--- a/tools/memory-model/Documentation/explanation.txt
+++ b/tools/memory-model/Documentation/explanation.txt
@@ -27,7 +27,7 @@ Explanation of the Linux-Kernel Memory Consistency Model
19. AND THEN THERE WAS ALPHA
20. THE HAPPENS-BEFORE RELATION: hb
21. THE PROPAGATES-BEFORE RELATION: pb
- 22. RCU RELATIONS: rcu-link, gp, rscs, rcu-fence, and rb
+ 22. RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-fence, and rb
23. LOCKING
24. ODDS AND ENDS
@@ -1430,8 +1430,8 @@ they execute means that it cannot have cycles. This requirement is
the content of the LKMM's "propagation" axiom.
-RCU RELATIONS: rcu-link, gp, rscs, rcu-fence, and rb
-----------------------------------------------------
+RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-fence, and rb
+-------------------------------------------------------------
RCU (Read-Copy-Update) is a powerful synchronization mechanism. It
rests on two concepts: grace periods and read-side critical sections.
@@ -1446,17 +1446,19 @@ As far as memory models are concerned, RCU's main feature is its
Grace-Period Guarantee, which states that a critical section can never
span a full grace period. In more detail, the Guarantee says:
- If a critical section starts before a grace period then it
- must end before the grace period does. In addition, every
- store that propagates to the critical section's CPU before the
- end of the critical section must propagate to every CPU before
- the end of the grace period.
+ For any critical section C and any grace period G, at least
+ one of the following statements must hold:
- If a critical section ends after a grace period ends then it
- must start after the grace period does. In addition, every
- store that propagates to the grace period's CPU before the
- start of the grace period must propagate to every CPU before
- the start of the critical section.
+(1) C ends before G does, and in addition, every store that
+ propagates to C's CPU before the end of C must propagate to
+ every CPU before G ends.
+
+(2) G starts before C does, and in addition, every store that
+ propagates to G's CPU before the start of G must propagate
+ to every CPU before C starts.
+
+In particular, it is not possible for a critical section to both start
+before and end after a grace period.
Here is a simple example of RCU in action:
@@ -1483,10 +1485,11 @@ The Grace Period Guarantee tells us that when this code runs, it will
never end with r1 = 1 and r2 = 0. The reasoning is as follows. r1 = 1
means that P0's store to x propagated to P1 before P1 called
synchronize_rcu(), so P0's critical section must have started before
-P1's grace period. On the other hand, r2 = 0 means that P0's store to
-y, which occurs before the end of the critical section, did not
-propagate to P1 before the end of the grace period, violating the
-Guarantee.
+P1's grace period, contrary to part (2) of the Guarantee. On the
+other hand, r2 = 0 means that P0's store to y, which occurs before the
+end of the critical section, did not propagate to P1 before the end of
+the grace period, contrary to part (1). Together the results violate
+the Guarantee.
In the kernel's implementations of RCU, the requirements for stores
to propagate to every CPU are fulfilled by placing strong fences at
@@ -1504,11 +1507,11 @@ before" or "ends after" a grace period? Some aspects of the meaning
are pretty obvious, as in the example above, but the details aren't
entirely clear. The LKMM formalizes this notion by means of the
rcu-link relation. rcu-link encompasses a very general notion of
-"before": Among other things, X ->rcu-link Z includes cases where X
-happens-before or is equal to some event Y which is equal to or comes
-before Z in the coherence order. When Y = Z this says that X ->rfe Z
-implies X ->rcu-link Z. In addition, when Y = X it says that X ->fr Z
-and X ->co Z each imply X ->rcu-link Z.
+"before": If E and F are RCU fence events (i.e., rcu_read_lock(),
+rcu_read_unlock(), or synchronize_rcu()) then among other things,
+E ->rcu-link F includes cases where E is po-before some memory-access
+event X, F is po-after some memory-access event Y, and we have any of
+X ->rfe Y, X ->co Y, or X ->fr Y.
The formal definition of the rcu-link relation is more than a little
obscure, and we won't give it here. It is closely related to the pb
@@ -1516,171 +1519,173 @@ relation, and the details don't matter unless you want to comb through
a somewhat lengthy formal proof. Pretty much all you need to know
about rcu-link is the information in the preceding paragraph.
-The LKMM also defines the gp and rscs relations. They bring grace
-periods and read-side critical sections into the picture, in the
+The LKMM also defines the rcu-gp and rcu-rscsi relations. They bring
+grace periods and read-side critical sections into the picture, in the
following way:
- E ->gp F means there is a synchronize_rcu() fence event S such
- that E ->po S and either S ->po F or S = F. In simple terms,
- there is a grace period po-between E and F.
+ E ->rcu-gp F means that E and F are in fact the same event,
+ and that event is a synchronize_rcu() fence (i.e., a grace
+ period).
- E ->rscs F means there is a critical section delimited by an
- rcu_read_lock() fence L and an rcu_read_unlock() fence U, such
- that E ->po U and either L ->po F or L = F. You can think of
- this as saying that E and F are in the same critical section
- (in fact, it also allows E to be po-before the start of the
- critical section and F to be po-after the end).
+ E ->rcu-rscsi F means that E and F are the rcu_read_unlock()
+ and rcu_read_lock() fence events delimiting some read-side
+ critical section. (The 'i' at the end of the name emphasizes
+ that this relation is "inverted": It links the end of the
+ critical section to the start.)
If we think of the rcu-link relation as standing for an extended
-"before", then X ->gp Y ->rcu-link Z says that X executes before a
-grace period which ends before Z executes. (In fact it covers more
-than this, because it also includes cases where X executes before a
-grace period and some store propagates to Z's CPU before Z executes
-but doesn't propagate to some other CPU until after the grace period
-ends.) Similarly, X ->rscs Y ->rcu-link Z says that X is part of (or
-before the start of) a critical section which starts before Z
-executes.
-
-The LKMM goes on to define the rcu-fence relation as a sequence of gp
-and rscs links separated by rcu-link links, in which the number of gp
-links is >= the number of rscs links. For example:
+"before", then X ->rcu-gp Y ->rcu-link Z roughly says that X is a
+grace period which ends before Z begins. (In fact it covers more than
+this, because it also includes cases where some store propagates to
+Z's CPU before Z begins but doesn't propagate to some other CPU until
+after X ends.) Similarly, X ->rcu-rscsi Y ->rcu-link Z says that X is
+the end of a critical section which starts before Z begins.
+
+The LKMM goes on to define the rcu-fence relation as a sequence of
+rcu-gp and rcu-rscsi links separated by rcu-link links, in which the
+number of rcu-gp links is >= the number of rcu-rscsi links. For
+example:
- X ->gp Y ->rcu-link Z ->rscs T ->rcu-link U ->gp V
+ X ->rcu-gp Y ->rcu-link Z ->rcu-rscsi T ->rcu-link U ->rcu-gp V
would imply that X ->rcu-fence V, because this sequence contains two
-gp links and only one rscs link. (It also implies that X ->rcu-fence T
-and Z ->rcu-fence V.) On the other hand:
+rcu-gp links and one rcu-rscsi link. (It also implies that
+X ->rcu-fence T and Z ->rcu-fence V.) On the other hand:
- X ->rscs Y ->rcu-link Z ->rscs T ->rcu-link U ->gp V
+ X ->rcu-rscsi Y ->rcu-link Z ->rcu-rscsi T ->rcu-link U ->rcu-gp V
does not imply X ->rcu-fence V, because the sequence contains only
-one gp link but two rscs links.
+one rcu-gp link but two rcu-rscsi links.
The rcu-fence relation is important because the Grace Period Guarantee
means that rcu-fence acts kind of like a strong fence. In particular,
-if W is a write and we have W ->rcu-fence Z, the Guarantee says that W
-will propagate to every CPU before Z executes.
+E ->rcu-fence F implies not only that E begins before F ends, but also
+that any write po-before E will propagate to every CPU before any
+instruction po-after F can execute. (However, it does not imply that
+E must execute before F; in fact, each synchronize_rcu() fence event
+is linked to itself by rcu-fence as a degenerate case.)
To prove this in full generality requires some intellectual effort.
We'll consider just a very simple case:
- W ->gp X ->rcu-link Y ->rscs Z.
+ G ->rcu-gp W ->rcu-link Z ->rcu-rscsi F.
-This formula means that there is a grace period G and a critical
-section C such that:
+This formula means that G and W are the same event (a grace period),
+and there are events X, Y and a read-side critical section C such that:
- 1. W is po-before G;
+ 1. G = W is po-before or equal to X;
- 2. X is equal to or po-after G;
+ 2. X comes "before" Y in some sense (including rfe, co and fr);
- 3. X comes "before" Y in some sense;
+ 2. Y is po-before Z;
- 4. Y is po-before the end of C;
+ 4. Z is the rcu_read_unlock() event marking the end of C;
- 5. Z is equal to or po-after the start of C.
+ 5. F is the rcu_read_lock() event marking the start of C.
-From 2 - 4 we deduce that the grace period G ends before the critical
-section C. Then the second part of the Grace Period Guarantee says
-not only that G starts before C does, but also that W (which executes
-on G's CPU before G starts) must propagate to every CPU before C
-starts. In particular, W propagates to every CPU before Z executes
-(or finishes executing, in the case where Z is equal to the
-rcu_read_lock() fence event which starts C.) This sort of reasoning
-can be expanded to handle all the situations covered by rcu-fence.
+From 1 - 4 we deduce that the grace period G ends before the critical
+section C. Then part (2) of the Grace Period Guarantee says not only
+that G starts before C does, but also that any write which executes on
+G's CPU before G starts must propagate to every CPU before C starts.
+In particular, the write propagates to every CPU before F finishes
+executing and hence before any instruction po-after F can execute.
+This sort of reasoning can be extended to handle all the situations
+covered by rcu-fence.
Finally, the LKMM defines the RCU-before (rb) relation in terms of
rcu-fence. This is done in essentially the same way as the pb
relation was defined in terms of strong-fence. We will omit the
-details; the end result is that E ->rb F implies E must execute before
-F, just as E ->pb F does (and for much the same reasons).
+details; the end result is that E ->rb F implies E must execute
+before F, just as E ->pb F does (and for much the same reasons).
Putting this all together, the LKMM expresses the Grace Period
Guarantee by requiring that the rb relation does not contain a cycle.
-Equivalently, this "rcu" axiom requires that there are no events E and
-F with E ->rcu-link F ->rcu-fence E. Or to put it a third way, the
-axiom requires that there are no cycles consisting of gp and rscs
-alternating with rcu-link, where the number of gp links is >= the
-number of rscs links.
+Equivalently, this "rcu" axiom requires that there are no events E
+and F with E ->rcu-link F ->rcu-fence E. Or to put it a third way,
+the axiom requires that there are no cycles consisting of rcu-gp and
+rcu-rscsi alternating with rcu-link, where the number of rcu-gp links
+is >= the number of rcu-rscsi links.
Justifying the axiom isn't easy, but it is in fact a valid
formalization of the Grace Period Guarantee. We won't attempt to go
through the detailed argument, but the following analysis gives a
-taste of what is involved. Suppose we have a violation of the first
-part of the Guarantee: A critical section starts before a grace
-period, and some store propagates to the critical section's CPU before
-the end of the critical section but doesn't propagate to some other
-CPU until after the end of the grace period.
+taste of what is involved. Suppose both parts of the Guarantee are
+violated: A critical section starts before a grace period, and some
+store propagates to the critical section's CPU before the end of the
+critical section but doesn't propagate to some other CPU until after
+the end of the grace period.
Putting symbols to these ideas, let L and U be the rcu_read_lock() and
rcu_read_unlock() fence events delimiting the critical section in
question, and let S be the synchronize_rcu() fence event for the grace
period. Saying that the critical section starts before S means there
-are events E and F where E is po-after L (which marks the start of the
-critical section), E is "before" F in the sense of the rcu-link
-relation, and F is po-before the grace period S:
+are events Q and R where Q is po-after L (which marks the start of the
+critical section), Q is "before" R in the sense used by the rcu-link
+relation, and R is po-before the grace period S. Thus we have:
- L ->po E ->rcu-link F ->po S.
+ L ->rcu-link S.
-Let W be the store mentioned above, let Z come before the end of the
+Let W be the store mentioned above, let Y come before the end of the
critical section and witness that W propagates to the critical
-section's CPU by reading from W, and let Y on some arbitrary CPU be a
-witness that W has not propagated to that CPU, where Y happens after
+section's CPU by reading from W, and let Z on some arbitrary CPU be a
+witness that W has not propagated to that CPU, where Z happens after
some event X which is po-after S. Symbolically, this amounts to:
- S ->po X ->hb* Y ->fr W ->rf Z ->po U.
+ S ->po X ->hb* Z ->fr W ->rf Y ->po U.
-The fr link from Y to W indicates that W has not propagated to Y's CPU
-at the time that Y executes. From this, it can be shown (see the
-discussion of the rcu-link relation earlier) that X and Z are related
-by rcu-link, yielding:
+The fr link from Z to W indicates that W has not propagated to Z's CPU
+at the time that Z executes. From this, it can be shown (see the
+discussion of the rcu-link relation earlier) that S and U are related
+by rcu-link:
- S ->po X ->rcu-link Z ->po U.
+ S ->rcu-link U.
-The formulas say that S is po-between F and X, hence F ->gp X. They
-also say that Z comes before the end of the critical section and E
-comes after its start, hence Z ->rscs E. From all this we obtain:
+Since S is a grace period we have S ->rcu-gp S, and since L and U are
+the start and end of the critical section C we have U ->rcu-rscsi L.
+From this we obtain:
- F ->gp X ->rcu-link Z ->rscs E ->rcu-link F,
+ S ->rcu-gp S ->rcu-link U ->rcu-rscsi L ->rcu-link S,
a forbidden cycle. Thus the "rcu" axiom rules out this violation of
the Grace Period Guarantee.
For something a little more down-to-earth, let's see how the axiom
works out in practice. Consider the RCU code example from above, this
-time with statement labels added to the memory access instructions:
+time with statement labels added:
int x, y;
P0()
{
- rcu_read_lock();
- W: WRITE_ONCE(x, 1);
- X: WRITE_ONCE(y, 1);
- rcu_read_unlock();
+ L: rcu_read_lock();
+ X: WRITE_ONCE(x, 1);
+ Y: WRITE_ONCE(y, 1);
+ U: rcu_read_unlock();
}
P1()
{
int r1, r2;
- Y: r1 = READ_ONCE(x);
- synchronize_rcu();
- Z: r2 = READ_ONCE(y);
+ Z: r1 = READ_ONCE(x);
+ S: synchronize_rcu();
+ W: r2 = READ_ONCE(y);
}
-If r2 = 0 at the end then P0's store at X overwrites the value that
-P1's load at Z reads from, so we have Z ->fre X and thus Z ->rcu-link X.
-In addition, there is a synchronize_rcu() between Y and Z, so therefore
-we have Y ->gp Z.
+If r2 = 0 at the end then P0's store at Y overwrites the value that
+P1's load at W reads from, so we have W ->fre Y. Since S ->po W and
+also Y ->po U, we get S ->rcu-link U. In addition, S ->rcu-gp S
+because S is a grace period.
-If r1 = 1 at the end then P1's load at Y reads from P0's store at W,
-so we have W ->rcu-link Y. In addition, W and X are in the same critical
-section, so therefore we have X ->rscs W.
+If r1 = 1 at the end then P1's load at Z reads from P0's store at X,
+so we have X ->rfe Z. Together with L ->po X and Z ->po S, this
+yields L ->rcu-link S. And since L and U are the start and end of a
+critical section, we have U ->rcu-rscsi L.
-Then X ->rscs W ->rcu-link Y ->gp Z ->rcu-link X is a forbidden cycle,
-violating the "rcu" axiom. Hence the outcome is not allowed by the
-LKMM, as we would expect.
+Then U ->rcu-rscsi L ->rcu-link S ->rcu-gp S ->rcu-link U is a
+forbidden cycle, violating the "rcu" axiom. Hence the outcome is not
+allowed by the LKMM, as we would expect.
For contrast, let's see what can happen in a more complicated example:
@@ -1690,51 +1695,52 @@ For contrast, let's see what can happen in a more complicated example:
{
int r0;
- rcu_read_lock();
- W: r0 = READ_ONCE(x);
- X: WRITE_ONCE(y, 1);
- rcu_read_unlock();
+ L0: rcu_read_lock();
+ r0 = READ_ONCE(x);
+ WRITE_ONCE(y, 1);
+ U0: rcu_read_unlock();
}
P1()
{
int r1;
- Y: r1 = READ_ONCE(y);
- synchronize_rcu();
- Z: WRITE_ONCE(z, 1);
+ r1 = READ_ONCE(y);
+ S1: synchronize_rcu();
+ WRITE_ONCE(z, 1);
}
P2()
{
int r2;
- rcu_read_lock();
- U: r2 = READ_ONCE(z);
- V: WRITE_ONCE(x, 1);
- rcu_read_unlock();
+ L2: rcu_read_lock();
+ r2 = READ_ONCE(z);
+ WRITE_ONCE(x, 1);
+ U2: rcu_read_unlock();
}
If r0 = r1 = r2 = 1 at the end, then similar reasoning to before shows
-that W ->rscs X ->rcu-link Y ->gp Z ->rcu-link U ->rscs V ->rcu-link W.
-However this cycle is not forbidden, because the sequence of relations
-contains fewer instances of gp (one) than of rscs (two). Consequently
-the outcome is allowed by the LKMM. The following instruction timing
-diagram shows how it might actually occur:
+that U0 ->rcu-rscsi L0 ->rcu-link S1 ->rcu-gp S1 ->rcu-link U2 ->rcu-rscsi
+L2 ->rcu-link U0. However this cycle is not forbidden, because the
+sequence of relations contains fewer instances of rcu-gp (one) than of
+rcu-rscsi (two). Consequently the outcome is allowed by the LKMM.
+The following instruction timing diagram shows how it might actually
+occur:
P0 P1 P2
-------------------- -------------------- --------------------
rcu_read_lock()
-X: WRITE_ONCE(y, 1)
- Y: r1 = READ_ONCE(y)
+WRITE_ONCE(y, 1)
+ r1 = READ_ONCE(y)
synchronize_rcu() starts
. rcu_read_lock()
- . V: WRITE_ONCE(x, 1)
-W: r0 = READ_ONCE(x) .
+ . WRITE_ONCE(x, 1)
+r0 = READ_ONCE(x) .
rcu_read_unlock() .
synchronize_rcu() ends
- Z: WRITE_ONCE(z, 1)
- U: r2 = READ_ONCE(z)
+ WRITE_ONCE(z, 1)
+ r2 = READ_ONCE(z)
rcu_read_unlock()
This requires P0 and P2 to execute their loads and stores out of
@@ -1744,6 +1750,15 @@ section in P0 both starts before P1's grace period does and ends
before it does, and the critical section in P2 both starts after P1's
grace period does and ends after it does.
+Addendum: The LKMM now supports SRCU (Sleepable Read-Copy-Update) in
+addition to normal RCU. The ideas involved are much the same as
+above, with new relations srcu-gp and srcu-rscsi added to represent
+SRCU grace periods and read-side critical sections. There is a
+restriction on the srcu-gp and srcu-rscsi links that can appear in an
+rcu-fence sequence (the srcu-rscsi links must be paired with srcu-gp
+links having the same SRCU domain with proper nesting); the details
+are relatively unimportant.
+
LOCKING
-------
diff --git a/tools/memory-model/README b/tools/memory-model/README
index acf9077cffaa..2b87f3971548 100644
--- a/tools/memory-model/README
+++ b/tools/memory-model/README
@@ -20,13 +20,17 @@ that litmus test to be exercised within the Linux kernel.
REQUIREMENTS
============
-Version 7.49 of the "herd7" and "klitmus7" tools must be downloaded
-separately:
+Version 7.52 or higher of the "herd7" and "klitmus7" tools must be
+downloaded separately:
https://github.com/herd/herdtools7
See "herdtools7/INSTALL.md" for installation instructions.
+Note that although these tools usually provide backwards compatibility,
+this is not absolutely guaranteed. Therefore, if a later version does
+not work, please try using the exact version called out above.
+
==================
BASIC USAGE: HERD7
@@ -156,6 +160,8 @@ lock.cat
README
This file.
+scripts Various scripts, see scripts/README.
+
===========
LIMITATIONS
@@ -219,8 +225,29 @@ The Linux-kernel memory model has the following limitations:
additional call_rcu() process to the site of the
emulated rcu-barrier().
- e. Sleepable RCU (SRCU) is not modeled. It can be
- emulated, but perhaps not simply.
+ e. Although sleepable RCU (SRCU) is now modeled, there
+ are some subtle differences between its semantics and
+ those in the Linux kernel. For example, the kernel
+ might interpret the following sequence as two partially
+ overlapping SRCU read-side critical sections:
+
+ 1 r1 = srcu_read_lock(&my_srcu);
+ 2 do_something_1();
+ 3 r2 = srcu_read_lock(&my_srcu);
+ 4 do_something_2();
+ 5 srcu_read_unlock(&my_srcu, r1);
+ 6 do_something_3();
+ 7 srcu_read_unlock(&my_srcu, r2);
+
+ In contrast, LKMM will interpret this as a nested pair of
+ SRCU read-side critical sections, with the outer critical
+ section spanning lines 1-7 and the inner critical section
+ spanning lines 3-5.
+
+ This difference would be more of a concern had anyone
+ identified a reasonable use case for partially overlapping
+ SRCU read-side critical sections. For more information,
+ please see: https://paulmck.livejournal.com/40593.html
f. Reader-writer locking is not modeled. It can be
emulated in litmus tests using atomic read-modify-write
diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell
index b84fb2f67109..def9131d3d8e 100644
--- a/tools/memory-model/linux-kernel.bell
+++ b/tools/memory-model/linux-kernel.bell
@@ -29,11 +29,18 @@ enum Barriers = 'wmb (*smp_wmb*) ||
'sync-rcu (*synchronize_rcu*) ||
'before-atomic (*smp_mb__before_atomic*) ||
'after-atomic (*smp_mb__after_atomic*) ||
- 'after-spinlock (*smp_mb__after_spinlock*)
+ 'after-spinlock (*smp_mb__after_spinlock*) ||
+ 'after-unlock-lock (*smp_mb__after_unlock_lock*)
instructions F[Barriers]
+(* SRCU *)
+enum SRCU = 'srcu-lock || 'srcu-unlock || 'sync-srcu
+instructions SRCU[SRCU]
+(* All srcu events *)
+let Srcu = Srcu-lock | Srcu-unlock | Sync-srcu
+
(* Compute matching pairs of nested Rcu-lock and Rcu-unlock *)
-let matched = let rec
+let rcu-rscs = let rec
unmatched-locks = Rcu-lock \ domain(matched)
and unmatched-unlocks = Rcu-unlock \ range(matched)
and unmatched = unmatched-locks | unmatched-unlocks
@@ -45,8 +52,27 @@ let matched = let rec
in matched
(* Validate nesting *)
-flag ~empty Rcu-lock \ domain(matched) as unbalanced-rcu-locking
-flag ~empty Rcu-unlock \ range(matched) as unbalanced-rcu-locking
+flag ~empty Rcu-lock \ domain(rcu-rscs) as unbalanced-rcu-locking
+flag ~empty Rcu-unlock \ range(rcu-rscs) as unbalanced-rcu-locking
+
+(* Compute matching pairs of nested Srcu-lock and Srcu-unlock *)
+let srcu-rscs = let rec
+ unmatched-locks = Srcu-lock \ domain(matched)
+ and unmatched-unlocks = Srcu-unlock \ range(matched)
+ and unmatched = unmatched-locks | unmatched-unlocks
+ and unmatched-po = ([unmatched] ; po ; [unmatched]) & loc
+ and unmatched-locks-to-unlocks =
+ ([unmatched-locks] ; po ; [unmatched-unlocks]) & loc
+ and matched = matched | (unmatched-locks-to-unlocks \
+ (unmatched-po ; unmatched-po))
+ in matched
+
+(* Validate nesting *)
+flag ~empty Srcu-lock \ domain(srcu-rscs) as unbalanced-srcu-locking
+flag ~empty Srcu-unlock \ range(srcu-rscs) as unbalanced-srcu-locking
+
+(* Check for use of synchronize_srcu() inside an RCU critical section *)
+flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep
-(* Outermost level of nesting only *)
-let crit = matched \ (po^-1 ; matched ; po^-1)
+(* Validate SRCU dynamic match *)
+flag ~empty different-values(srcu-rscs) as srcu-bad-nesting
diff --git a/tools/memory-model/linux-kernel.cat b/tools/memory-model/linux-kernel.cat
index 882fc33274ac..8dcb37835b61 100644
--- a/tools/memory-model/linux-kernel.cat
+++ b/tools/memory-model/linux-kernel.cat
@@ -30,8 +30,10 @@ let wmb = [W] ; fencerel(Wmb) ; [W]
let mb = ([M] ; fencerel(Mb) ; [M]) |
([M] ; fencerel(Before-atomic) ; [RMW] ; po? ; [M]) |
([M] ; po? ; [RMW] ; fencerel(After-atomic) ; [M]) |
- ([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M])
-let gp = po ; [Sync-rcu] ; po?
+ ([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) |
+ ([M] ; po ; [UL] ; (co | po) ; [LKW] ;
+ fencerel(After-unlock-lock) ; [M])
+let gp = po ; [Sync-rcu | Sync-srcu] ; po?
let strong-fence = mb | gp
@@ -89,32 +91,47 @@ acyclic pb as propagation
(*******)
(*
- * Effect of read-side critical section proceeds from the rcu_read_lock()
- * onward on the one hand and from the rcu_read_unlock() backwards on the
- * other hand.
+ * Effects of read-side critical sections proceed from the rcu_read_unlock()
+ * or srcu_read_unlock() backwards on the one hand, and from the
+ * rcu_read_lock() or srcu_read_lock() forwards on the other hand.
+ *
+ * In the definition of rcu-fence below, the po term at the left-hand side
+ * of each disjunct and the po? term at the right-hand end have been factored
+ * out. They have been moved into the definitions of rcu-link and rb.
+ * This was necessary in order to apply the "& loc" tests correctly.
*)
-let rscs = po ; crit^-1 ; po?
+let rcu-gp = [Sync-rcu] (* Compare with gp *)
+let srcu-gp = [Sync-srcu]
+let rcu-rscsi = rcu-rscs^-1
+let srcu-rscsi = srcu-rscs^-1
(*
* The synchronize_rcu() strong fence is special in that it can order not
* one but two non-rf relations, but only in conjunction with an RCU
* read-side critical section.
*)
-let rcu-link = hb* ; pb* ; prop
+let rcu-link = po? ; hb* ; pb* ; prop ; po
(*
* Any sequence containing at least as many grace periods as RCU read-side
* critical sections (joined by rcu-link) acts as a generalized strong fence.
+ * Likewise for SRCU grace periods and read-side critical sections, provided
+ * the synchronize_srcu() and srcu_read_[un]lock() calls refer to the same
+ * struct srcu_struct location.
*)
-let rec rcu-fence = gp |
- (gp ; rcu-link ; rscs) |
- (rscs ; rcu-link ; gp) |
- (gp ; rcu-link ; rcu-fence ; rcu-link ; rscs) |
- (rscs ; rcu-link ; rcu-fence ; rcu-link ; gp) |
+let rec rcu-fence = rcu-gp | srcu-gp |
+ (rcu-gp ; rcu-link ; rcu-rscsi) |
+ ((srcu-gp ; rcu-link ; srcu-rscsi) & loc) |
+ (rcu-rscsi ; rcu-link ; rcu-gp) |
+ ((srcu-rscsi ; rcu-link ; srcu-gp) & loc) |
+ (rcu-gp ; rcu-link ; rcu-fence ; rcu-link ; rcu-rscsi) |
+ ((srcu-gp ; rcu-link ; rcu-fence ; rcu-link ; srcu-rscsi) & loc) |
+ (rcu-rscsi ; rcu-link ; rcu-fence ; rcu-link ; rcu-gp) |
+ ((srcu-rscsi ; rcu-link ; rcu-fence ; rcu-link ; srcu-gp) & loc) |
(rcu-fence ; rcu-link ; rcu-fence)
(* rb orders instructions just as pb does *)
-let rb = prop ; rcu-fence ; hb* ; pb*
+let rb = prop ; po ; rcu-fence ; po? ; hb* ; pb*
irreflexive rb as rcu
diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def
index 6fa3eb28d40b..551eeaa389d4 100644
--- a/tools/memory-model/linux-kernel.def
+++ b/tools/memory-model/linux-kernel.def
@@ -23,6 +23,7 @@ smp_wmb() { __fence{wmb}; }
smp_mb__before_atomic() { __fence{before-atomic}; }
smp_mb__after_atomic() { __fence{after-atomic}; }
smp_mb__after_spinlock() { __fence{after-spinlock}; }
+smp_mb__after_unlock_lock() { __fence{after-unlock-lock}; }
// Exchange
xchg(X,V) __xchg{mb}(X,V)
@@ -46,6 +47,12 @@ rcu_read_unlock() { __fence{rcu-unlock}; }
synchronize_rcu() { __fence{sync-rcu}; }
synchronize_rcu_expedited() { __fence{sync-rcu}; }
+// SRCU
+srcu_read_lock(X) __srcu{srcu-lock}(X)
+srcu_read_unlock(X,Y) { __srcu{srcu-unlock}(X,Y); }
+synchronize_srcu(X) { __srcu{sync-srcu}(X); }
+synchronize_srcu_expedited(X) { __srcu{sync-srcu}(X); }
+
// Atomic
atomic_read(X) READ_ONCE(*X)
atomic_set(X,V) { WRITE_ONCE(*X,V); }
diff --git a/tools/memory-model/lock.cat b/tools/memory-model/lock.cat
index 305ded17e741..a059d1a6d8a2 100644
--- a/tools/memory-model/lock.cat
+++ b/tools/memory-model/lock.cat
@@ -6,9 +6,6 @@
(*
* Generate coherence orders and handle lock operations
- *
- * Warning: spin_is_locked() crashes herd7 versions strictly before 7.48.
- * spin_is_locked() is functional from herd7 version 7.49.
*)
include "cross.cat"
diff --git a/tools/memory-model/scripts/README b/tools/memory-model/scripts/README
new file mode 100644
index 000000000000..29375a1fbbfa
--- /dev/null
+++ b/tools/memory-model/scripts/README
@@ -0,0 +1,70 @@
+ ============
+ LKMM SCRIPTS
+ ============
+
+
+These scripts are run from the tools/memory-model directory.
+
+checkalllitmus.sh
+
+ Run all litmus tests in the litmus-tests directory, checking
+ the results against the expected results recorded in the
+ "Result:" comment lines.
+
+checkghlitmus.sh
+
+ Run all litmus tests in the https://github.com/paulmckrcu/litmus
+ archive that are C-language and that have "Result:" comment lines
+ documenting expected results, comparing the actual results to
+ those expected.
+
+checklitmushist.sh
+
+ Run all litmus tests having .litmus.out files from previous
+ initlitmushist.sh or newlitmushist.sh runs, comparing the
+ herd output to that of the original runs.
+
+checklitmus.sh
+
+ Check a single litmus test against its "Result:" expected result.
+
+cmplitmushist.sh
+
+ Compare output from two different runs of the same litmus tests,
+ with the absolute pathnames of the tests to run provided one
+ name per line on standard input. Not normally run manually,
+ provided instead for use by other scripts.
+
+initlitmushist.sh
+
+ Run all litmus tests having no more than the specified number
+ of processes given a specified timeout, recording the results
+ in .litmus.out files.
+
+judgelitmus.sh
+
+ Given a .litmus file and its .litmus.out herd output, check the
+ .litmus.out file against the .litmus file's "Result:" comment to
+ judge whether the test ran correctly. Not normally run manually,
+ provided instead for use by other scripts.
+
+newlitmushist.sh
+
+ For all new or updated litmus tests having no more than the
+ specified number of processes given a specified timeout, run
+ and record the results in .litmus.out files.
+
+parseargs.sh
+
+ Parse command-line arguments. Not normally run manually,
+ provided instead for use by other scripts.
+
+runlitmushist.sh
+
+ Run the litmus tests whose absolute pathnames are provided one
+ name per line on standard input. Not normally run manually,
+ provided instead for use by other scripts.
+
+README
+
+ This file
diff --git a/tools/memory-model/scripts/checkalllitmus.sh b/tools/memory-model/scripts/checkalllitmus.sh
index ca528f9a24d4..b35fcd61ecf6 100755
--- a/tools/memory-model/scripts/checkalllitmus.sh
+++ b/tools/memory-model/scripts/checkalllitmus.sh
@@ -1,42 +1,27 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
#
-# Run herd tests on all .litmus files in the specified directory (which
-# defaults to litmus-tests) and check each file's result against a "Result:"
-# comment within that litmus test. If the verification result does not
-# match that specified in the litmus test, this script prints an error
-# message prefixed with "^^^". It also outputs verification results to
-# a file whose name is that of the specified litmus test, but with ".out"
-# appended.
+# Run herd tests on all .litmus files in the litmus-tests directory
+# and check each file's result against a "Result:" comment within that
+# litmus test. If the verification result does not match that specified
+# in the litmus test, this script prints an error message prefixed with
+# "^^^". It also outputs verification results to a file whose name is
+# that of the specified litmus test, but with ".out" appended.
#
# Usage:
-# checkalllitmus.sh [ directory ]
+# checkalllitmus.sh
#
-# The LINUX_HERD_OPTIONS environment variable may be used to specify
-# arguments to herd, whose default is defined by the checklitmus.sh script.
-# Thus, one would normally run this in the directory containing the memory
-# model, specifying the pathname of the litmus test to check.
+# Run this in the directory containing the memory model.
#
# This script makes no attempt to run the litmus tests concurrently.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright IBM Corporation, 2018
#
# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-litmusdir=${1-litmus-tests}
+. scripts/parseargs.sh
+
+litmusdir=litmus-tests
if test -d "$litmusdir" -a -r "$litmusdir" -a -x "$litmusdir"
then
:
@@ -45,6 +30,14 @@ else
exit 255
fi
+# Create any new directories that have appeared in the github litmus
+# repo since the last run.
+if test "$LKMM_DESTDIR" != "."
+then
+ find $litmusdir -type d -print |
+ ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
+fi
+
# Find the checklitmus script. If it is not where we expect it, then
# assume that the caller has the PATH environment variable set
# appropriately.
@@ -57,7 +50,7 @@ fi
# Run the script on all the litmus tests in the specified directory
ret=0
-for i in litmus-tests/*.litmus
+for i in $litmusdir/*.litmus
do
if ! $clscript $i
then
@@ -66,8 +59,8 @@ do
done
if test "$ret" -ne 0
then
- echo " ^^^ VERIFICATION MISMATCHES"
+ echo " ^^^ VERIFICATION MISMATCHES" 1>&2
else
- echo All litmus tests verified as was expected.
+ echo All litmus tests verified as was expected. 1>&2
fi
exit $ret
diff --git a/tools/memory-model/scripts/checkghlitmus.sh b/tools/memory-model/scripts/checkghlitmus.sh
new file mode 100644
index 000000000000..6589fbb6f653
--- /dev/null
+++ b/tools/memory-model/scripts/checkghlitmus.sh
@@ -0,0 +1,65 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Runs the C-language litmus tests having a maximum number of processes
+# to run, defaults to 6.
+#
+# sh checkghlitmus.sh
+#
+# Run from the Linux kernel tools/memory-model directory. See the
+# parseargs.sh scripts for arguments.
+
+. scripts/parseargs.sh
+
+T=/tmp/checkghlitmus.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+# Clone the repository if it is not already present.
+if test -d litmus
+then
+ :
+else
+ git clone https://github.com/paulmckrcu/litmus
+ ( cd litmus; git checkout origin/master )
+fi
+
+# Create any new directories that have appeared in the github litmus
+# repo since the last run.
+if test "$LKMM_DESTDIR" != "."
+then
+ find litmus -type d -print |
+ ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
+fi
+
+# Create a list of the C-language litmus tests previously run.
+( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) |
+ sed -e 's/\.out$//' |
+ xargs -r egrep -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' |
+ xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already
+
+# Create a list of C-language litmus tests with "Result:" commands and
+# no more than the specified number of processes.
+find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C
+xargs < $T/list-C -r egrep -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' > $T/list-C-result
+xargs < $T/list-C-result -r grep -L "^P${LKMM_PROCS}" > $T/list-C-result-short
+
+# Form list of tests without corresponding .litmus.out files
+sort $T/list-C-already $T/list-C-result-short | uniq -u > $T/list-C-needed
+
+# Run any needed tests.
+if scripts/runlitmushist.sh < $T/list-C-needed > $T/run.stdout 2> $T/run.stderr
+then
+ errs=
+else
+ errs=1
+fi
+
+sed < $T/list-C-result-short -e 's,^,scripts/judgelitmus.sh ,' |
+ sh > $T/judge.stdout 2> $T/judge.stderr
+
+if test -n "$errs"
+then
+ cat $T/run.stderr 1>&2
+fi
+grep '!!!' $T/judge.stdout
diff --git a/tools/memory-model/scripts/checklitmus.sh b/tools/memory-model/scripts/checklitmus.sh
index bf12a75c0719..dd08801a30b0 100755
--- a/tools/memory-model/scripts/checklitmus.sh
+++ b/tools/memory-model/scripts/checklitmus.sh
@@ -1,40 +1,24 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
#
-# Run a herd test and check the result against a "Result:" comment within
-# the litmus test. If the verification result does not match that specified
-# in the litmus test, this script prints an error message prefixed with
-# "^^^" and exits with a non-zero status. It also outputs verification
+# Run a herd test and invokes judgelitmus.sh to check the result against
+# a "Result:" comment within the litmus test. It also outputs verification
# results to a file whose name is that of the specified litmus test, but
# with ".out" appended.
#
# Usage:
# checklitmus.sh file.litmus
#
-# The LINUX_HERD_OPTIONS environment variable may be used to specify
-# arguments to herd, which default to "-conf linux-kernel.cfg". Thus,
-# one would normally run this in the directory containing the memory model,
-# specifying the pathname of the litmus test to check.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
+# Run this in the directory containing the memory model, specifying the
+# pathname of the litmus test to check. The caller is expected to have
+# properly set up the LKMM environment variables.
#
# Copyright IBM Corporation, 2018
#
# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
litmus=$1
-herdoptions=${LINUX_HERD_OPTIONS--conf linux-kernel.cfg}
+herdoptions=${LKMM_HERD_OPTIONS--conf linux-kernel.cfg}
if test -f "$litmus" -a -r "$litmus"
then
@@ -43,44 +27,8 @@ else
echo ' --- ' error: \"$litmus\" is not a readable file
exit 255
fi
-if grep -q '^ \* Result: ' $litmus
-then
- outcome=`grep -m 1 '^ \* Result: ' $litmus | awk '{ print $3 }'`
-else
- outcome=specified
-fi
-echo Herd options: $herdoptions > $litmus.out
-/usr/bin/time herd7 -o ~/tmp $herdoptions $litmus >> $litmus.out 2>&1
-grep "Herd options:" $litmus.out
-grep '^Observation' $litmus.out
-if grep -q '^Observation' $litmus.out
-then
- :
-else
- cat $litmus.out
- echo ' ^^^ Verification error'
- echo ' ^^^ Verification error' >> $litmus.out 2>&1
- exit 255
-fi
-if test "$outcome" = DEADLOCK
-then
- echo grep 3 and 4
- if grep '^Observation' $litmus.out | grep -q 'Never 0 0$'
- then
- ret=0
- else
- echo " ^^^ Unexpected non-$outcome verification"
- echo " ^^^ Unexpected non-$outcome verification" >> $litmus.out 2>&1
- ret=1
- fi
-elif grep '^Observation' $litmus.out | grep -q $outcome || test "$outcome" = Maybe
-then
- ret=0
-else
- echo " ^^^ Unexpected non-$outcome verification"
- echo " ^^^ Unexpected non-$outcome verification" >> $litmus.out 2>&1
- ret=1
-fi
-tail -2 $litmus.out | head -1
-exit $ret
+echo Herd options: $herdoptions > $LKMM_DESTDIR/$litmus.out
+/usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $litmus >> $LKMM_DESTDIR/$litmus.out 2>&1
+
+scripts/judgelitmus.sh $litmus
diff --git a/tools/memory-model/scripts/checklitmushist.sh b/tools/memory-model/scripts/checklitmushist.sh
new file mode 100644
index 000000000000..1d210ffb7c8a
--- /dev/null
+++ b/tools/memory-model/scripts/checklitmushist.sh
@@ -0,0 +1,60 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Reruns the C-language litmus tests previously run that match the
+# specified criteria, and compares the result to that of the previous
+# runs from initlitmushist.sh and/or newlitmushist.sh.
+#
+# sh checklitmushist.sh
+#
+# Run from the Linux kernel tools/memory-model directory.
+# See scripts/parseargs.sh for list of arguments.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+. scripts/parseargs.sh
+
+T=/tmp/checklitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+if test -d litmus
+then
+ :
+else
+ echo Run scripts/initlitmushist.sh first, need litmus repo.
+ exit 1
+fi
+
+# Create the results directory and populate it with subdirectories.
+# The initial output is created here to avoid clobbering the output
+# generated earlier.
+mkdir $T/results
+find litmus -type d -print | ( cd $T/results; sed -e 's/^/mkdir -p /' | sh )
+
+# Create the list of litmus tests already run, then remove those that
+# are excluded by this run's --procs argument.
+( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) |
+ sed -e 's/\.out$//' |
+ xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already
+xargs < $T/list-C-already -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short
+
+# Redirect output, run tests, then restore destination directory.
+destdir="$LKMM_DESTDIR"
+LKMM_DESTDIR=$T/results; export LKMM_DESTDIR
+scripts/runlitmushist.sh < $T/list-C-short > $T/runlitmushist.sh.out 2>&1
+LKMM_DESTDIR="$destdir"; export LKMM_DESTDIR
+
+# Move the newly generated .litmus.out files to .litmus.out.new files
+# in the destination directory.
+cdir=`pwd`
+ddir=`awk -v c="$cdir" -v d="$LKMM_DESTDIR" \
+ 'END { if (d ~ /^\//) print d; else print c "/" d; }' < /dev/null`
+( cd $T/results; find litmus -type f -name '*.litmus.out' -print |
+ sed -e 's,^.*$,cp & '"$ddir"'/&.new,' | sh )
+
+sed < $T/list-C-short -e 's,^,'"$LKMM_DESTDIR/"',' |
+ sh scripts/cmplitmushist.sh
+exit $?
diff --git a/tools/memory-model/scripts/cmplitmushist.sh b/tools/memory-model/scripts/cmplitmushist.sh
new file mode 100644
index 000000000000..0f498aeeccf5
--- /dev/null
+++ b/tools/memory-model/scripts/cmplitmushist.sh
@@ -0,0 +1,87 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Compares .out and .out.new files for each name on standard input,
+# one full pathname per line. Outputs comparison results followed by
+# a summary.
+#
+# sh cmplitmushist.sh
+
+T=/tmp/cmplitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+# comparetest oldpath newpath
+perfect=0
+obsline=0
+noobsline=0
+obsresult=0
+badcompare=0
+comparetest () {
+ grep -v 'maxresident)k\|minor)pagefaults\|^Time' $1 > $T/oldout
+ grep -v 'maxresident)k\|minor)pagefaults\|^Time' $2 > $T/newout
+ if cmp -s $T/oldout $T/newout && grep -q '^Observation' $1
+ then
+ echo Exact output match: $2
+ perfect=`expr "$perfect" + 1`
+ return 0
+ fi
+
+ grep '^Observation' $1 > $T/oldout
+ grep '^Observation' $2 > $T/newout
+ if test -s $T/oldout -o -s $T/newout
+ then
+ if cmp -s $T/oldout $T/newout
+ then
+ echo Matching Observation result and counts: $2
+ obsline=`expr "$obsline" + 1`
+ return 0
+ fi
+ else
+ echo Missing Observation line "(e.g., herd7 timeout)": $2
+ noobsline=`expr "$noobsline" + 1`
+ return 0
+ fi
+
+ grep '^Observation' $1 | awk '{ print $3 }' > $T/oldout
+ grep '^Observation' $2 | awk '{ print $3 }' > $T/newout
+ if cmp -s $T/oldout $T/newout
+ then
+ echo Matching Observation Always/Sometimes/Never result: $2
+ obsresult=`expr "$obsresult" + 1`
+ return 0
+ fi
+ echo ' !!!' Result changed: $2
+ badcompare=`expr "$badcompare" + 1`
+ return 1
+}
+
+sed -e 's/^.*$/comparetest &.out &.out.new/' > $T/cmpscript
+. $T/cmpscript > $T/cmpscript.out
+cat $T/cmpscript.out
+
+echo ' ---' Summary: 1>&2
+grep '!!!' $T/cmpscript.out 1>&2
+if test "$perfect" -ne 0
+then
+ echo Exact output matches: $perfect 1>&2
+fi
+if test "$obsline" -ne 0
+then
+ echo Matching Observation result and counts: $obsline 1>&2
+fi
+if test "$noobsline" -ne 0
+then
+ echo Missing Observation line "(e.g., herd7 timeout)": $noobsline 1>&2
+fi
+if test "$obsresult" -ne 0
+then
+ echo Matching Observation Always/Sometimes/Never result: $obsresult 1>&2
+fi
+if test "$badcompare" -ne 0
+then
+ echo "!!!" Result changed: $badcompare 1>&2
+ exit 1
+fi
+
+exit 0
diff --git a/tools/memory-model/scripts/initlitmushist.sh b/tools/memory-model/scripts/initlitmushist.sh
new file mode 100644
index 000000000000..956b6957484d
--- /dev/null
+++ b/tools/memory-model/scripts/initlitmushist.sh
@@ -0,0 +1,68 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Runs the C-language litmus tests matching the specified criteria.
+# Generates the output for each .litmus file into a corresponding
+# .litmus.out file, and does not judge the result.
+#
+# sh initlitmushist.sh
+#
+# Run from the Linux kernel tools/memory-model directory.
+# See scripts/parseargs.sh for list of arguments.
+#
+# This script can consume significant wallclock time and CPU, especially as
+# the value of --procs rises. On a four-core (eight hardware threads)
+# 2.5GHz x86 with a one-minute per-run timeout:
+#
+# --procs wallclock CPU timeouts tests
+# 1 0m11.241s 0m1.086s 0 19
+# 2 1m12.598s 2m8.459s 2 393
+# 3 1m30.007s 6m2.479s 4 2291
+# 4 3m26.042s 18m5.139s 9 3217
+# 5 4m26.661s 23m54.128s 13 3784
+# 6 4m41.900s 26m4.721s 13 4352
+# 7 5m51.463s 35m50.868s 13 4626
+# 8 10m5.235s 68m43.672s 34 5117
+# 9 15m57.80s 105m58.101s 69 5156
+# 10 16m14.13s 103m35.009s 69 5165
+# 20 27m48.55s 198m3.286s 156 5269
+#
+# Increasing the timeout on the 20-process run to five minutes increases
+# the runtime to about 90 minutes with the CPU time rising to about
+# 10 hours. On the other hand, it decreases the number of timeouts to 101.
+#
+# Note that there are historical tests for which herd7 will fail
+# completely, for example, litmus/manual/atomic/C-unlock-wait-00.litmus
+# contains a call to spin_unlock_wait(), which no longer exists in either
+# the kernel or LKMM.
+
+. scripts/parseargs.sh
+
+T=/tmp/initlitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+if test -d litmus
+then
+ :
+else
+ git clone https://github.com/paulmckrcu/litmus
+ ( cd litmus; git checkout origin/master )
+fi
+
+# Create any new directories that have appeared in the github litmus
+# repo since the last run.
+if test "$LKMM_DESTDIR" != "."
+then
+ find litmus -type d -print |
+ ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
+fi
+
+# Create a list of the C-language litmus tests with no more than the
+# specified number of processes (per the --procs argument).
+find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C
+xargs < $T/list-C -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short
+
+scripts/runlitmushist.sh < $T/list-C-short
+
+exit 0
diff --git a/tools/memory-model/scripts/judgelitmus.sh b/tools/memory-model/scripts/judgelitmus.sh
new file mode 100644
index 000000000000..0cc63875e395
--- /dev/null
+++ b/tools/memory-model/scripts/judgelitmus.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Given a .litmus test and the corresponding .litmus.out file, check
+# the .litmus.out file against the "Result:" comment to judge whether
+# the test ran correctly.
+#
+# Usage:
+# judgelitmus.sh file.litmus
+#
+# Run this in the directory containing the memory model, specifying the
+# pathname of the litmus test to check.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+litmus=$1
+
+if test -f "$litmus" -a -r "$litmus"
+then
+ :
+else
+ echo ' --- ' error: \"$litmus\" is not a readable file
+ exit 255
+fi
+if test -f "$LKMM_DESTDIR/$litmus".out -a -r "$LKMM_DESTDIR/$litmus".out
+then
+ :
+else
+ echo ' --- ' error: \"$LKMM_DESTDIR/$litmus\".out is not a readable file
+ exit 255
+fi
+if grep -q '^ \* Result: ' $litmus
+then
+ outcome=`grep -m 1 '^ \* Result: ' $litmus | awk '{ print $3 }'`
+else
+ outcome=specified
+fi
+
+grep '^Observation' $LKMM_DESTDIR/$litmus.out
+if grep -q '^Observation' $LKMM_DESTDIR/$litmus.out
+then
+ :
+else
+ echo ' !!! Verification error' $litmus
+ if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
+ then
+ echo ' !!! Verification error' >> $LKMM_DESTDIR/$litmus.out 2>&1
+ fi
+ exit 255
+fi
+if test "$outcome" = DEADLOCK
+then
+ if grep '^Observation' $LKMM_DESTDIR/$litmus.out | grep -q 'Never 0 0$'
+ then
+ ret=0
+ else
+ echo " !!! Unexpected non-$outcome verification" $litmus
+ if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
+ then
+ echo " !!! Unexpected non-$outcome verification" >> $LKMM_DESTDIR/$litmus.out 2>&1
+ fi
+ ret=1
+ fi
+elif grep '^Observation' $LKMM_DESTDIR/$litmus.out | grep -q $outcome || test "$outcome" = Maybe
+then
+ ret=0
+else
+ echo " !!! Unexpected non-$outcome verification" $litmus
+ if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
+ then
+ echo " !!! Unexpected non-$outcome verification" >> $LKMM_DESTDIR/$litmus.out 2>&1
+ fi
+ ret=1
+fi
+tail -2 $LKMM_DESTDIR/$litmus.out | head -1
+exit $ret
diff --git a/tools/memory-model/scripts/newlitmushist.sh b/tools/memory-model/scripts/newlitmushist.sh
new file mode 100644
index 000000000000..991f8f814881
--- /dev/null
+++ b/tools/memory-model/scripts/newlitmushist.sh
@@ -0,0 +1,61 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Runs the C-language litmus tests matching the specified criteria
+# that do not already have a corresponding .litmus.out file, and does
+# not judge the result.
+#
+# sh newlitmushist.sh
+#
+# Run from the Linux kernel tools/memory-model directory.
+# See scripts/parseargs.sh for list of arguments.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+. scripts/parseargs.sh
+
+T=/tmp/newlitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+if test -d litmus
+then
+ :
+else
+ echo Run scripts/initlitmushist.sh first, need litmus repo.
+ exit 1
+fi
+
+# Create any new directories that have appeared in the github litmus
+# repo since the last run.
+if test "$LKMM_DESTDIR" != "."
+then
+ find litmus -type d -print |
+ ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
+fi
+
+# Create a list of the C-language litmus tests previously run.
+( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) |
+ sed -e 's/\.out$//' |
+ xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already
+
+# Form full list of litmus tests with no more than the specified
+# number of processes (per the --procs argument).
+find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C-all
+xargs < $T/list-C-all -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short
+
+# Form list of new tests. Note: This does not handle litmus-test deletion!
+sort $T/list-C-already $T/list-C-short | uniq -u > $T/list-C-new
+
+# Form list of litmus tests that have changed since the last run.
+sed < $T/list-C-short -e 's,^.*$,if test & -nt '"$LKMM_DESTDIR"'/&.out; then echo &; fi,' > $T/list-C-script
+sh $T/list-C-script > $T/list-C-newer
+
+# Merge the list of new and of updated litmus tests: These must be (re)run.
+sort -u $T/list-C-new $T/list-C-newer > $T/list-C-needed
+
+scripts/runlitmushist.sh < $T/list-C-needed
+
+exit 0
diff --git a/tools/memory-model/scripts/parseargs.sh b/tools/memory-model/scripts/parseargs.sh
new file mode 100644
index 000000000000..859e1d581e05
--- /dev/null
+++ b/tools/memory-model/scripts/parseargs.sh
@@ -0,0 +1,136 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# the corresponding .litmus.out file, and does not judge the result.
+#
+# . scripts/parseargs.sh
+#
+# Include into other Linux kernel tools/memory-model scripts.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+T=/tmp/parseargs.sh.$$
+mkdir $T
+
+# Initialize one parameter: initparam name default
+initparam () {
+ echo if test -z '"$'$1'"' > $T/s
+ echo then >> $T/s
+ echo $1='"'$2'"' >> $T/s
+ echo export $1 >> $T/s
+ echo fi >> $T/s
+ echo $1_DEF='$'$1 >> $T/s
+ . $T/s
+}
+
+initparam LKMM_DESTDIR "."
+initparam LKMM_HERD_OPTIONS "-conf linux-kernel.cfg"
+initparam LKMM_JOBS `getconf _NPROCESSORS_ONLN`
+initparam LKMM_PROCS "3"
+initparam LKMM_TIMEOUT "1m"
+
+scriptname=$0
+
+usagehelp () {
+ echo "Usage $scriptname [ arguments ]"
+ echo " --destdir path (place for .litmus.out, default by .litmus)"
+ echo " --herdopts -conf linux-kernel.cfg ..."
+ echo " --jobs N (number of jobs, default one per CPU)"
+ echo " --procs N (litmus tests with at most this many processes)"
+ echo " --timeout N (herd7 timeout (e.g., 10s, 1m, 2hr, 1d, '')"
+ echo "Defaults: --destdir '$LKMM_DESTDIR_DEF' --herdopts '$LKMM_HERD_OPTIONS_DEF' --jobs '$LKMM_JOBS_DEF' --procs '$LKMM_PROCS_DEF' --timeout '$LKMM_TIMEOUT_DEF'"
+ exit 1
+}
+
+usage () {
+ usagehelp 1>&2
+}
+
+# checkarg --argname argtype $# arg mustmatch cannotmatch
+checkarg () {
+ if test $3 -le 1
+ then
+ echo $1 needs argument $2 matching \"$5\"
+ usage
+ fi
+ if echo "$4" | grep -q -e "$5"
+ then
+ :
+ else
+ echo $1 $2 \"$4\" must match \"$5\"
+ usage
+ fi
+ if echo "$4" | grep -q -e "$6"
+ then
+ echo $1 $2 \"$4\" must not match \"$6\"
+ usage
+ fi
+}
+
+while test $# -gt 0
+do
+ case "$1" in
+ --destdir)
+ checkarg --destdir "(path to directory)" "$#" "$2" '.\+' '^--'
+ LKMM_DESTDIR="$2"
+ mkdir $LKMM_DESTDIR > /dev/null 2>&1
+ if ! test -e "$LKMM_DESTDIR"
+ then
+ echo "Cannot create directory --destdir '$LKMM_DESTDIR'"
+ usage
+ fi
+ if test -d "$LKMM_DESTDIR" -a -w "$LKMM_DESTDIR" -a -x "$LKMM_DESTDIR"
+ then
+ :
+ else
+ echo "Directory --destdir '$LKMM_DESTDIR' insufficient permissions to create files"
+ usage
+ fi
+ shift
+ ;;
+ --herdopts|--herdopt)
+ checkarg --destdir "(herd options)" "$#" "$2" '.*' '^--'
+ LKMM_HERD_OPTIONS="$2"
+ shift
+ ;;
+ -j[1-9]*)
+ njobs="`echo $1 | sed -e 's/^-j//'`"
+ trailchars="`echo $njobs | sed -e 's/[0-9]\+\(.*\)$/\1/'`"
+ if test -n "$trailchars"
+ then
+ echo $1 trailing characters "'$trailchars'"
+ usagehelp
+ fi
+ LKMM_JOBS="`echo $njobs | sed -e 's/^\([0-9]\+\).*$/\1/'`"
+ ;;
+ --jobs|--job|-j)
+ checkarg --jobs "(number)" "$#" "$2" '^[1-9][0-9]\+$' '^--'
+ LKMM_JOBS="$2"
+ shift
+ ;;
+ --procs|--proc)
+ checkarg --procs "(number)" "$#" "$2" '^[0-9]\+$' '^--'
+ LKMM_PROCS="$2"
+ shift
+ ;;
+ --timeout)
+ checkarg --timeout "(timeout spec)" "$#" "$2" '^\([0-9]\+[smhd]\?\|\)$' '^--'
+ LKMM_TIMEOUT="$2"
+ shift
+ ;;
+ *)
+ echo Unknown argument $1
+ usage
+ ;;
+ esac
+ shift
+done
+if test -z "$LKMM_TIMEOUT"
+then
+ LKMM_TIMEOUT_CMD=""; export LKMM_TIMEOUT_CMD
+else
+ LKMM_TIMEOUT_CMD="timeout $LKMM_TIMEOUT"; export LKMM_TIMEOUT_CMD
+fi
+rm -rf $T
diff --git a/tools/memory-model/scripts/runlitmushist.sh b/tools/memory-model/scripts/runlitmushist.sh
new file mode 100644
index 000000000000..e507f5f933d5
--- /dev/null
+++ b/tools/memory-model/scripts/runlitmushist.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Runs the C-language litmus tests specified on standard input, using up
+# to the specified number of CPUs (defaulting to all of them) and placing
+# the results in the specified directory (defaulting to the same place
+# the litmus test came from).
+#
+# sh runlitmushist.sh
+#
+# Run from the Linux kernel tools/memory-model directory.
+# This script uses environment variables produced by parseargs.sh.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+T=/tmp/runlitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+if test -d litmus
+then
+ :
+else
+ echo Directory \"litmus\" missing, aborting run.
+ exit 1
+fi
+
+# Prefixes for per-CPU scripts
+for ((i=0;i<$LKMM_JOBS;i++))
+do
+ echo dir="$LKMM_DESTDIR" > $T/$i.sh
+ echo T=$T >> $T/$i.sh
+ echo herdoptions=\"$LKMM_HERD_OPTIONS\" >> $T/$i.sh
+ cat << '___EOF___' >> $T/$i.sh
+ runtest () {
+ echo ' ... ' /usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $1 '>' $dir/$1.out '2>&1'
+ if /usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $1 > $dir/$1.out 2>&1
+ then
+ if ! grep -q '^Observation ' $dir/$1.out
+ then
+ echo ' !!! Herd failed, no Observation:' $1
+ fi
+ else
+ exitcode=$?
+ if test "$exitcode" -eq 124
+ then
+ exitmsg="timed out"
+ else
+ exitmsg="failed, exit code $exitcode"
+ fi
+ echo ' !!! Herd' ${exitmsg}: $1
+ fi
+ }
+___EOF___
+done
+
+awk -v q="'" -v b='\\' '
+{
+ print "echo `grep " q "^P[0-9]" b "+(" q " " $0 " | tail -1 | sed -e " q "s/^P" b "([0-9]" b "+" b ")(.*$/" b "1/" q "` " $0
+}' | bash |
+sort -k1n |
+awk -v ncpu=$LKMM_JOBS -v t=$T '
+{
+ print "runtest " $2 >> t "/" NR % ncpu ".sh";
+}
+
+END {
+ for (i = 0; i < ncpu; i++) {
+ print "sh " t "/" i ".sh > " t "/" i ".sh.out 2>&1 &";
+ close(t "/" i ".sh");
+ }
+ print "wait";
+}' | sh
+cat $T/*.sh.out
+if grep -q '!!!' $T/*.sh.out
+then
+ echo ' ---' Summary: 1>&2
+ grep '!!!' $T/*.sh.out 1>&2
+ nfail="`grep '!!!' $T/*.sh.out | wc -l`"
+ echo 'Number of failed herd runs (e.g., timeout): ' $nfail 1>&2
+ exit 1
+else
+ echo All runs completed successfully. 1>&2
+ exit 0
+fi
diff --git a/tools/objtool/Documentation/stack-validation.txt b/tools/objtool/Documentation/stack-validation.txt
index 3995735a878f..4dd11a554b9b 100644
--- a/tools/objtool/Documentation/stack-validation.txt
+++ b/tools/objtool/Documentation/stack-validation.txt
@@ -111,7 +111,7 @@ c) Higher live patching compatibility rate
be detectable). Objtool makes that possible.
For more details, see the livepatch documentation in the Linux kernel
- source tree at Documentation/livepatch/livepatch.txt.
+ source tree at Documentation/livepatch/livepatch.rst.
Rules
-----
@@ -306,7 +306,7 @@ ignore it:
- To skip validation of a file, add
- OBJECT_FILES_NON_STANDARD_filename.o := n
+ OBJECT_FILES_NON_STANDARD_filename.o := y
to the Makefile.
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index c9d038f91af6..88158239622b 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -7,11 +7,12 @@ ARCH := x86
endif
# always use the host compiler
+HOSTAR ?= ar
HOSTCC ?= gcc
HOSTLD ?= ld
+AR = $(HOSTAR)
CC = $(HOSTCC)
LD = $(HOSTLD)
-AR = ar
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
@@ -25,14 +26,17 @@ LIBSUBCMD = $(LIBSUBCMD_OUTPUT)libsubcmd.a
OBJTOOL := $(OUTPUT)objtool
OBJTOOL_IN := $(OBJTOOL)-in.o
+LIBELF_FLAGS := $(shell pkg-config libelf --cflags 2>/dev/null)
+LIBELF_LIBS := $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
+
all: $(OBJTOOL)
INCLUDES := -I$(srctree)/tools/include \
-I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
-I$(srctree)/tools/objtool/arch/$(ARCH)/include
WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
-CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES)
-LDFLAGS += -lelf $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
+CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
+LDFLAGS += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
# Allow old libelf to be used:
elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
diff --git a/tools/objtool/arch.h b/tools/objtool/arch.h
index b0d7dc3d71b5..580e344db3dd 100644
--- a/tools/objtool/arch.h
+++ b/tools/objtool/arch.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ARCH_H
@@ -33,7 +21,11 @@
#define INSN_STACK 8
#define INSN_BUG 9
#define INSN_NOP 10
-#define INSN_OTHER 11
+#define INSN_STAC 11
+#define INSN_CLAC 12
+#define INSN_STD 13
+#define INSN_CLD 14
+#define INSN_OTHER 15
#define INSN_LAST INSN_OTHER
enum op_dest_type {
@@ -41,6 +33,7 @@ enum op_dest_type {
OP_DEST_REG_INDIRECT,
OP_DEST_MEM,
OP_DEST_PUSH,
+ OP_DEST_PUSHF,
OP_DEST_LEAVE,
};
@@ -55,6 +48,7 @@ enum op_src_type {
OP_SRC_REG_INDIRECT,
OP_SRC_CONST,
OP_SRC_POP,
+ OP_SRC_POPF,
OP_SRC_ADD,
OP_SRC_AND,
};
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 540a209b78ab..584568f27a83 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
@@ -357,19 +345,26 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
/* pushf */
*type = INSN_STACK;
op->src.type = OP_SRC_CONST;
- op->dest.type = OP_DEST_PUSH;
+ op->dest.type = OP_DEST_PUSHF;
break;
case 0x9d:
/* popf */
*type = INSN_STACK;
- op->src.type = OP_SRC_POP;
+ op->src.type = OP_SRC_POPF;
op->dest.type = OP_DEST_MEM;
break;
case 0x0f:
- if (op2 >= 0x80 && op2 <= 0x8f) {
+ if (op2 == 0x01) {
+
+ if (modrm == 0xca)
+ *type = INSN_CLAC;
+ else if (modrm == 0xcb)
+ *type = INSN_STAC;
+
+ } else if (op2 >= 0x80 && op2 <= 0x8f) {
*type = INSN_JUMP_CONDITIONAL;
@@ -444,6 +439,14 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
*type = INSN_CALL;
break;
+ case 0xfc:
+ *type = INSN_CLD;
+ break;
+
+ case 0xfd:
+ *type = INSN_STD;
+ break;
+
case 0xff:
if (modrm_reg == 2 || modrm_reg == 3)
diff --git a/tools/objtool/arch/x86/include/asm/inat.h b/tools/objtool/arch/x86/include/asm/inat.h
index 1c78580e58be..4cf2ad521f65 100644
--- a/tools/objtool/arch/x86/include/asm/inat.h
+++ b/tools/objtool/arch/x86/include/asm/inat.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ASM_X86_INAT_H
#define _ASM_X86_INAT_H
/*
* x86 instruction attributes
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#include <asm/inat_types.h>
diff --git a/tools/objtool/arch/x86/include/asm/inat_types.h b/tools/objtool/arch/x86/include/asm/inat_types.h
index cb3c20ce39cf..b047efa9ddc2 100644
--- a/tools/objtool/arch/x86/include/asm/inat_types.h
+++ b/tools/objtool/arch/x86/include/asm/inat_types.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ASM_X86_INAT_TYPES_H
#define _ASM_X86_INAT_TYPES_H
/*
* x86 instruction attributes
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
/* Instruction attributes */
diff --git a/tools/objtool/arch/x86/include/asm/insn.h b/tools/objtool/arch/x86/include/asm/insn.h
index c2c01f84df75..154f27be8bfc 100644
--- a/tools/objtool/arch/x86/include/asm/insn.h
+++ b/tools/objtool/arch/x86/include/asm/insn.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ASM_X86_INSN_H
#define _ASM_X86_INSN_H
/*
* x86 instruction analysis
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* Copyright (C) IBM Corporation, 2009
*/
diff --git a/tools/objtool/arch/x86/include/asm/orc_types.h b/tools/objtool/arch/x86/include/asm/orc_types.h
index 46f516dd80ce..6e060907c163 100644
--- a/tools/objtool/arch/x86/include/asm/orc_types.h
+++ b/tools/objtool/arch/x86/include/asm/orc_types.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ORC_TYPES_H
diff --git a/tools/objtool/arch/x86/lib/inat.c b/tools/objtool/arch/x86/lib/inat.c
index c1f01a8e9f65..12539fca75c4 100644
--- a/tools/objtool/arch/x86/lib/inat.c
+++ b/tools/objtool/arch/x86/lib/inat.c
@@ -1,22 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* x86 instruction attribute tables
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#include <asm/insn.h>
diff --git a/tools/objtool/arch/x86/lib/insn.c b/tools/objtool/arch/x86/lib/insn.c
index 1088eb8f3a5f..0b5862ba6a75 100644
--- a/tools/objtool/arch/x86/lib/insn.c
+++ b/tools/objtool/arch/x86/lib/insn.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* x86 instruction analysis
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* Copyright (C) IBM Corporation, 2002, 2004, 2009
*/
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 694abc628e9b..c807984a03c1 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -29,7 +17,7 @@
#include "builtin.h"
#include "check.h"
-bool no_fp, no_unreachable, retpoline, module;
+bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess;
static const char * const check_usage[] = {
"objtool check [<options>] file.o",
@@ -41,6 +29,8 @@ const struct option check_options[] = {
OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"),
OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"),
+ OPT_BOOLEAN('b', "backtrace", &backtrace, "unwind on error"),
+ OPT_BOOLEAN('a', "uaccess", &uaccess, "enable uaccess checking"),
OPT_END(),
};
diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c
index 77ea2b97117d..5f7cc6157edd 100644
--- a/tools/objtool/builtin-orc.c
+++ b/tools/objtool/builtin-orc.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h
index 28ff40e19a14..a32736f8d2a4 100644
--- a/tools/objtool/builtin.h
+++ b/tools/objtool/builtin.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _BUILTIN_H
#define _BUILTIN_H
@@ -20,7 +8,7 @@
#include <subcmd/parse-options.h>
extern const struct option check_options[];
-extern bool no_fp, no_unreachable, retpoline, module;
+extern bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess;
extern int cmd_check(int argc, const char **argv);
extern int cmd_orc(int argc, const char **argv);
diff --git a/tools/objtool/cfi.h b/tools/objtool/cfi.h
index 2fe883c665c7..4427bf8ed686 100644
--- a/tools/objtool/cfi.h
+++ b/tools/objtool/cfi.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _OBJTOOL_CFI_H
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 0414a0d52262..172f99195726 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <string.h>
@@ -28,9 +16,12 @@
#include <linux/hashtable.h>
#include <linux/kernel.h>
+#define FAKE_JUMP_OFFSET -1
+
struct alternative {
struct list_head list;
struct instruction *insn;
+ bool skip_orig;
};
const char *objname;
@@ -105,29 +96,6 @@ static struct instruction *next_insn_same_func(struct objtool_file *file,
insn = next_insn_same_sec(file, insn))
/*
- * Check if the function has been manually whitelisted with the
- * STACK_FRAME_NON_STANDARD macro, or if it should be automatically whitelisted
- * due to its use of a context switching instruction.
- */
-static bool ignore_func(struct objtool_file *file, struct symbol *func)
-{
- struct rela *rela;
-
- /* check for STACK_FRAME_NON_STANDARD */
- if (file->whitelist && file->whitelist->rela)
- list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
- if (rela->sym->type == STT_SECTION &&
- rela->sym->sec == func->sec &&
- rela->addend == func->offset)
- return true;
- if (rela->sym->type == STT_FUNC && rela->sym == func)
- return true;
- }
-
- return false;
-}
-
-/*
* This checks to see if the given function is a "noreturn" function.
*
* For global functions which are outside the scope of this object file, we
@@ -165,6 +133,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
"fortify_panic",
"usercopy_abort",
"machine_real_restart",
+ "rewind_stack_do_exit",
};
if (func->bind == STB_WEAK)
@@ -436,18 +405,107 @@ static void add_ignores(struct objtool_file *file)
struct instruction *insn;
struct section *sec;
struct symbol *func;
+ struct rela *rela;
- for_each_sec(file, sec) {
- list_for_each_entry(func, &sec->symbol_list, list) {
- if (func->type != STT_FUNC)
- continue;
+ sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
+ if (!sec)
+ return;
- if (!ignore_func(file, func))
+ list_for_each_entry(rela, &sec->rela_list, list) {
+ switch (rela->sym->type) {
+ case STT_FUNC:
+ func = rela->sym;
+ break;
+
+ case STT_SECTION:
+ func = find_symbol_by_offset(rela->sym->sec, rela->addend);
+ if (!func || func->type != STT_FUNC)
continue;
+ break;
- func_for_each_insn_all(file, func, insn)
- insn->ignore = true;
+ default:
+ WARN("unexpected relocation symbol type in %s: %d", sec->name, rela->sym->type);
+ continue;
}
+
+ func_for_each_insn_all(file, func, insn)
+ insn->ignore = true;
+ }
+}
+
+/*
+ * This is a whitelist of functions that is allowed to be called with AC set.
+ * The list is meant to be minimal and only contains compiler instrumentation
+ * ABI and a few functions used to implement *_{to,from}_user() functions.
+ *
+ * These functions must not directly change AC, but may PUSHF/POPF.
+ */
+static const char *uaccess_safe_builtin[] = {
+ /* KASAN */
+ "kasan_report",
+ "check_memory_region",
+ /* KASAN out-of-line */
+ "__asan_loadN_noabort",
+ "__asan_load1_noabort",
+ "__asan_load2_noabort",
+ "__asan_load4_noabort",
+ "__asan_load8_noabort",
+ "__asan_load16_noabort",
+ "__asan_storeN_noabort",
+ "__asan_store1_noabort",
+ "__asan_store2_noabort",
+ "__asan_store4_noabort",
+ "__asan_store8_noabort",
+ "__asan_store16_noabort",
+ /* KASAN in-line */
+ "__asan_report_load_n_noabort",
+ "__asan_report_load1_noabort",
+ "__asan_report_load2_noabort",
+ "__asan_report_load4_noabort",
+ "__asan_report_load8_noabort",
+ "__asan_report_load16_noabort",
+ "__asan_report_store_n_noabort",
+ "__asan_report_store1_noabort",
+ "__asan_report_store2_noabort",
+ "__asan_report_store4_noabort",
+ "__asan_report_store8_noabort",
+ "__asan_report_store16_noabort",
+ /* KCOV */
+ "write_comp_data",
+ "__sanitizer_cov_trace_pc",
+ "__sanitizer_cov_trace_const_cmp1",
+ "__sanitizer_cov_trace_const_cmp2",
+ "__sanitizer_cov_trace_const_cmp4",
+ "__sanitizer_cov_trace_const_cmp8",
+ "__sanitizer_cov_trace_cmp1",
+ "__sanitizer_cov_trace_cmp2",
+ "__sanitizer_cov_trace_cmp4",
+ "__sanitizer_cov_trace_cmp8",
+ /* UBSAN */
+ "ubsan_type_mismatch_common",
+ "__ubsan_handle_type_mismatch",
+ "__ubsan_handle_type_mismatch_v1",
+ /* misc */
+ "csum_partial_copy_generic",
+ "__memcpy_mcsafe",
+ "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
+ NULL
+};
+
+static void add_uaccess_safe(struct objtool_file *file)
+{
+ struct symbol *func;
+ const char **name;
+
+ if (!uaccess)
+ return;
+
+ for (name = uaccess_safe_builtin; *name; name++) {
+ func = find_symbol_by_name(file->elf, *name);
+ if (!func)
+ continue;
+
+ func->alias->uaccess_safe = true;
}
}
@@ -457,13 +515,13 @@ static void add_ignores(struct objtool_file *file)
* But it at least allows objtool to understand the control flow *around* the
* retpoline.
*/
-static int add_nospec_ignores(struct objtool_file *file)
+static int add_ignore_alternatives(struct objtool_file *file)
{
struct section *sec;
struct rela *rela;
struct instruction *insn;
- sec = find_section_by_name(file->elf, ".rela.discard.nospec");
+ sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
if (!sec)
return 0;
@@ -475,7 +533,7 @@ static int add_nospec_ignores(struct objtool_file *file)
insn = find_insn(file, rela->sym->sec, rela->addend);
if (!insn) {
- WARN("bad .discard.nospec entry");
+ WARN("bad .discard.ignore_alts entry");
return -1;
}
@@ -500,7 +558,7 @@ static int add_jump_destinations(struct objtool_file *file)
insn->type != INSN_JUMP_UNCONDITIONAL)
continue;
- if (insn->ignore)
+ if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET)
continue;
rela = find_rela_by_dest_range(insn->sec, insn->offset,
@@ -524,7 +582,8 @@ static int add_jump_destinations(struct objtool_file *file)
continue;
} else {
/* sibling call */
- insn->jump_dest = 0;
+ insn->call_dest = rela->sym;
+ insn->jump_dest = NULL;
continue;
}
@@ -546,25 +605,38 @@ static int add_jump_destinations(struct objtool_file *file)
}
/*
- * For GCC 8+, create parent/child links for any cold
- * subfunctions. This is _mostly_ redundant with a similar
- * initialization in read_symbols().
- *
- * If a function has aliases, we want the *first* such function
- * in the symbol table to be the subfunction's parent. In that
- * case we overwrite the initialization done in read_symbols().
- *
- * However this code can't completely replace the
- * read_symbols() code because this doesn't detect the case
- * where the parent function's only reference to a subfunction
- * is through a switch table.
+ * Cross-function jump.
*/
if (insn->func && insn->jump_dest->func &&
- insn->func != insn->jump_dest->func &&
- !strstr(insn->func->name, ".cold.") &&
- strstr(insn->jump_dest->func->name, ".cold.")) {
- insn->func->cfunc = insn->jump_dest->func;
- insn->jump_dest->func->pfunc = insn->func;
+ insn->func != insn->jump_dest->func) {
+
+ /*
+ * For GCC 8+, create parent/child links for any cold
+ * subfunctions. This is _mostly_ redundant with a
+ * similar initialization in read_symbols().
+ *
+ * If a function has aliases, we want the *first* such
+ * function in the symbol table to be the subfunction's
+ * parent. In that case we overwrite the
+ * initialization done in read_symbols().
+ *
+ * However this code can't completely replace the
+ * read_symbols() code because this doesn't detect the
+ * case where the parent function's only reference to a
+ * subfunction is through a switch table.
+ */
+ if (!strstr(insn->func->name, ".cold.") &&
+ strstr(insn->jump_dest->func->name, ".cold.")) {
+ insn->func->cfunc = insn->jump_dest->func;
+ insn->jump_dest->func->pfunc = insn->func;
+
+ } else if (insn->jump_dest->func->pfunc != insn->func->pfunc &&
+ insn->jump_dest->offset == insn->jump_dest->func->offset) {
+
+ /* sibling class */
+ insn->call_dest = insn->jump_dest->func;
+ insn->jump_dest = NULL;
+ }
}
}
@@ -633,9 +705,6 @@ static int add_call_destinations(struct objtool_file *file)
* conditionally jumps to the _end_ of the entry. We have to modify these
* jumps' destinations to point back to .text rather than the end of the
* entry in .altinstr_replacement.
- *
- * 4. It has been requested that we don't validate the !POPCNT feature path
- * which is a "very very small percentage of machines".
*/
static int handle_group_alt(struct objtool_file *file,
struct special_alt *special_alt,
@@ -651,9 +720,6 @@ static int handle_group_alt(struct objtool_file *file,
if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
break;
- if (special_alt->skip_orig)
- insn->type = INSN_NOP;
-
insn->alt_group = true;
last_orig_insn = insn;
}
@@ -669,10 +735,10 @@ static int handle_group_alt(struct objtool_file *file,
clear_insn_state(&fake_jump->state);
fake_jump->sec = special_alt->new_sec;
- fake_jump->offset = -1;
+ fake_jump->offset = FAKE_JUMP_OFFSET;
fake_jump->type = INSN_JUMP_UNCONDITIONAL;
fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
- fake_jump->ignore = true;
+ fake_jump->func = orig_insn->func;
}
if (!special_alt->new_len) {
@@ -695,6 +761,7 @@ static int handle_group_alt(struct objtool_file *file,
last_new_insn = insn;
insn->ignore = orig_insn->ignore_alts;
+ insn->func = orig_insn->func;
if (insn->type != INSN_JUMP_CONDITIONAL &&
insn->type != INSN_JUMP_UNCONDITIONAL)
@@ -817,6 +884,8 @@ static int add_special_section_alts(struct objtool_file *file)
}
alt->insn = new_insn;
+ alt->skip_orig = special_alt->skip_orig;
+ orig_insn->ignore_alts |= special_alt->skip_alt;
list_add_tail(&alt->list, &orig_insn->alts);
list_del(&special_alt->list);
@@ -1238,8 +1307,9 @@ static int decode_sections(struct objtool_file *file)
return ret;
add_ignores(file);
+ add_uaccess_safe(file);
- ret = add_nospec_ignores(file);
+ ret = add_ignore_alternatives(file);
if (ret)
return ret;
@@ -1319,11 +1389,11 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s
return 0;
/* push */
- if (op->dest.type == OP_DEST_PUSH)
+ if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
cfa->offset += 8;
/* pop */
- if (op->src.type == OP_SRC_POP)
+ if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
cfa->offset -= 8;
/* add immediate to sp */
@@ -1580,6 +1650,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
break;
case OP_SRC_POP:
+ case OP_SRC_POPF:
if (!state->drap && op->dest.type == OP_DEST_REG &&
op->dest.reg == cfa->base) {
@@ -1644,6 +1715,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
break;
case OP_DEST_PUSH:
+ case OP_DEST_PUSHF:
state->stack_size += 8;
if (cfa->base == CFI_SP)
cfa->offset += 8;
@@ -1734,7 +1806,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
break;
case OP_DEST_MEM:
- if (op->src.type != OP_SRC_POP) {
+ if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
WARN_FUNC("unknown stack-related memory operation",
insn->sec, insn->offset);
return -1;
@@ -1798,6 +1870,50 @@ static bool insn_state_match(struct instruction *insn, struct insn_state *state)
return false;
}
+static inline bool func_uaccess_safe(struct symbol *func)
+{
+ if (func)
+ return func->alias->uaccess_safe;
+
+ return false;
+}
+
+static inline const char *insn_dest_name(struct instruction *insn)
+{
+ if (insn->call_dest)
+ return insn->call_dest->name;
+
+ return "{dynamic}";
+}
+
+static int validate_call(struct instruction *insn, struct insn_state *state)
+{
+ if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
+ WARN_FUNC("call to %s() with UACCESS enabled",
+ insn->sec, insn->offset, insn_dest_name(insn));
+ return 1;
+ }
+
+ if (state->df) {
+ WARN_FUNC("call to %s() with DF set",
+ insn->sec, insn->offset, insn_dest_name(insn));
+ return 1;
+ }
+
+ return 0;
+}
+
+static int validate_sibling_call(struct instruction *insn, struct insn_state *state)
+{
+ if (has_modified_stack_frame(state)) {
+ WARN_FUNC("sibling call from callable instruction with modified stack frame",
+ insn->sec, insn->offset);
+ return 1;
+ }
+
+ return validate_call(insn, state);
+}
+
/*
* Follow the branch starting at the given instruction, and recursively follow
* any other branches (jumps). Meanwhile, track the frame pointer state at
@@ -1831,7 +1947,8 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
return 1;
}
- func = insn->func ? insn->func->pfunc : NULL;
+ if (insn->func)
+ func = insn->func->pfunc;
if (func && insn->ignore) {
WARN_FUNC("BUG: why am I validating an ignored function?",
@@ -1843,7 +1960,9 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
if (!insn->hint && !insn_state_match(insn, &state))
return 1;
- return 0;
+ /* If we were here with AC=0, but now have AC=1, go again */
+ if (insn->state.uaccess || !state.uaccess)
+ return 0;
}
if (insn->hint) {
@@ -1892,16 +2011,42 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
insn->visited = true;
if (!insn->ignore_alts) {
+ bool skip_orig = false;
+
list_for_each_entry(alt, &insn->alts, list) {
+ if (alt->skip_orig)
+ skip_orig = true;
+
ret = validate_branch(file, alt->insn, state);
- if (ret)
- return 1;
+ if (ret) {
+ if (backtrace)
+ BT_FUNC("(alt)", insn);
+ return ret;
+ }
}
+
+ if (skip_orig)
+ return 0;
}
switch (insn->type) {
case INSN_RETURN:
+ if (state.uaccess && !func_uaccess_safe(func)) {
+ WARN_FUNC("return with UACCESS enabled", sec, insn->offset);
+ return 1;
+ }
+
+ if (!state.uaccess && func_uaccess_safe(func)) {
+ WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function", sec, insn->offset);
+ return 1;
+ }
+
+ if (state.df) {
+ WARN_FUNC("return with DF set", sec, insn->offset);
+ return 1;
+ }
+
if (func && has_modified_stack_frame(&state)) {
WARN_FUNC("return with modified stack frame",
sec, insn->offset);
@@ -1917,17 +2062,22 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
return 0;
case INSN_CALL:
- if (is_fentry_call(insn))
- break;
+ case INSN_CALL_DYNAMIC:
+ ret = validate_call(insn, &state);
+ if (ret)
+ return ret;
- ret = dead_end_function(file, insn->call_dest);
- if (ret == 1)
- return 0;
- if (ret == -1)
- return 1;
+ if (insn->type == INSN_CALL) {
+ if (is_fentry_call(insn))
+ break;
+
+ ret = dead_end_function(file, insn->call_dest);
+ if (ret == 1)
+ return 0;
+ if (ret == -1)
+ return 1;
+ }
- /* fallthrough */
- case INSN_CALL_DYNAMIC:
if (!no_fp && func && !has_valid_stack_frame(&state)) {
WARN_FUNC("call without frame pointer save/setup",
sec, insn->offset);
@@ -1937,18 +2087,21 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
case INSN_JUMP_CONDITIONAL:
case INSN_JUMP_UNCONDITIONAL:
- if (insn->jump_dest &&
- (!func || !insn->jump_dest->func ||
- insn->jump_dest->func->pfunc == func)) {
- ret = validate_branch(file, insn->jump_dest,
- state);
+ if (func && !insn->jump_dest) {
+ ret = validate_sibling_call(insn, &state);
if (ret)
- return 1;
+ return ret;
- } else if (func && has_modified_stack_frame(&state)) {
- WARN_FUNC("sibling call from callable instruction with modified stack frame",
- sec, insn->offset);
- return 1;
+ } else if (insn->jump_dest &&
+ (!func || !insn->jump_dest->func ||
+ insn->jump_dest->func->pfunc == func)) {
+ ret = validate_branch(file, insn->jump_dest,
+ state);
+ if (ret) {
+ if (backtrace)
+ BT_FUNC("(branch)", insn);
+ return ret;
+ }
}
if (insn->type == INSN_JUMP_UNCONDITIONAL)
@@ -1957,11 +2110,10 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
break;
case INSN_JUMP_DYNAMIC:
- if (func && list_empty(&insn->alts) &&
- has_modified_stack_frame(&state)) {
- WARN_FUNC("sibling call from callable instruction with modified stack frame",
- sec, insn->offset);
- return 1;
+ if (func && list_empty(&insn->alts)) {
+ ret = validate_sibling_call(insn, &state);
+ if (ret)
+ return ret;
}
return 0;
@@ -1978,6 +2130,63 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
if (update_insn_state(insn, &state))
return 1;
+ if (insn->stack_op.dest.type == OP_DEST_PUSHF) {
+ if (!state.uaccess_stack) {
+ state.uaccess_stack = 1;
+ } else if (state.uaccess_stack >> 31) {
+ WARN_FUNC("PUSHF stack exhausted", sec, insn->offset);
+ return 1;
+ }
+ state.uaccess_stack <<= 1;
+ state.uaccess_stack |= state.uaccess;
+ }
+
+ if (insn->stack_op.src.type == OP_SRC_POPF) {
+ if (state.uaccess_stack) {
+ state.uaccess = state.uaccess_stack & 1;
+ state.uaccess_stack >>= 1;
+ if (state.uaccess_stack == 1)
+ state.uaccess_stack = 0;
+ }
+ }
+
+ break;
+
+ case INSN_STAC:
+ if (state.uaccess) {
+ WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
+ return 1;
+ }
+
+ state.uaccess = true;
+ break;
+
+ case INSN_CLAC:
+ if (!state.uaccess && insn->func) {
+ WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
+ return 1;
+ }
+
+ if (func_uaccess_safe(func) && !state.uaccess_stack) {
+ WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
+ return 1;
+ }
+
+ state.uaccess = false;
+ break;
+
+ case INSN_STD:
+ if (state.df)
+ WARN_FUNC("recursive STD", sec, insn->offset);
+
+ state.df = true;
+ break;
+
+ case INSN_CLD:
+ if (!state.df && insn->func)
+ WARN_FUNC("redundant CLD", sec, insn->offset);
+
+ state.df = false;
break;
default:
@@ -2014,6 +2223,8 @@ static int validate_unwind_hints(struct objtool_file *file)
for_each_insn(file, insn) {
if (insn->hint && !insn->visited) {
ret = validate_branch(file, insn, state);
+ if (ret && backtrace)
+ BT_FUNC("<=== (hint)", insn);
warnings += ret;
}
}
@@ -2141,7 +2352,11 @@ static int validate_functions(struct objtool_file *file)
if (!insn || insn->ignore)
continue;
+ state.uaccess = func->alias->uaccess_safe;
+
ret = validate_branch(file, insn, state);
+ if (ret && backtrace)
+ BT_FUNC("<=== (func)", insn);
warnings += ret;
}
}
@@ -2184,9 +2399,10 @@ static void cleanup(struct objtool_file *file)
elf_close(file->elf);
}
+static struct objtool_file file;
+
int check(const char *_objname, bool orc)
{
- struct objtool_file file;
int ret, warnings = 0;
objname = _objname;
@@ -2197,7 +2413,6 @@ int check(const char *_objname, bool orc)
INIT_LIST_HEAD(&file.insn_list);
hash_init(file.insn_hash);
- file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
file.c_file = find_section_by_name(file.elf, ".comment");
file.ignore_unreachables = no_unreachable;
file.hints = false;
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
index e6e8a655b556..cb60b9acf5cf 100644
--- a/tools/objtool/check.h
+++ b/tools/objtool/check.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _CHECK_H
@@ -31,7 +19,8 @@ struct insn_state {
int stack_size;
unsigned char type;
bool bp_scratch;
- bool drap, end;
+ bool drap, end, uaccess, df;
+ unsigned int uaccess_stack;
int drap_reg, drap_offset;
struct cfi_reg vals[CFI_NUM_REGS];
};
@@ -60,7 +49,6 @@ struct objtool_file {
struct elf *elf;
struct list_head insn_list;
DECLARE_HASHTABLE(insn_hash, 16);
- struct section *whitelist;
bool ignore_unreachables, c_file, hints, rodata;
};
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index b8f3cca8e58b..e99e1be19ad9 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* elf.c - ELF access library
*
* Adapted from kpatch (https://github.com/dynup/kpatch):
* Copyright (C) 2013-2015 Josh Poimboeuf <jpoimboe@redhat.com>
* Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <sys/types.h>
@@ -219,7 +207,7 @@ static int read_sections(struct elf *elf)
static int read_symbols(struct elf *elf)
{
struct section *symtab, *sec;
- struct symbol *sym, *pfunc;
+ struct symbol *sym, *pfunc, *alias;
struct list_head *entry, *tmp;
int symbols_nr, i;
char *coldstr;
@@ -239,6 +227,7 @@ static int read_symbols(struct elf *elf)
return -1;
}
memset(sym, 0, sizeof(*sym));
+ alias = sym;
sym->idx = i;
@@ -288,11 +277,17 @@ static int read_symbols(struct elf *elf)
break;
}
- if (sym->offset == s->offset && sym->len >= s->len) {
- entry = tmp;
- break;
+ if (sym->offset == s->offset) {
+ if (sym->len == s->len && alias == sym)
+ alias = s;
+
+ if (sym->len >= s->len) {
+ entry = tmp;
+ break;
+ }
}
}
+ sym->alias = alias;
list_add(&sym->list, entry);
hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx);
}
diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
index bc97ed86b9cd..e44ca5d51871 100644
--- a/tools/objtool/elf.h
+++ b/tools/objtool/elf.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _OBJTOOL_ELF_H
@@ -61,7 +49,8 @@ struct symbol {
unsigned char bind, type;
unsigned long offset;
unsigned int len;
- struct symbol *pfunc, *cfunc;
+ struct symbol *pfunc, *cfunc, *alias;
+ bool uaccess_safe;
};
struct rela {
diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
index 07f329919828..0b3528f05053 100644
--- a/tools/objtool/objtool.c
+++ b/tools/objtool/objtool.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/tools/objtool/orc.h b/tools/objtool/orc.h
index b0e92a6d0903..ee2832221e62 100644
--- a/tools/objtool/orc.h
+++ b/tools/objtool/orc.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ORC_H
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
index faa444270ee3..13ccf775a83a 100644
--- a/tools/objtool/orc_dump.c
+++ b/tools/objtool/orc_dump.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <unistd.h>
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
index 3f98dcfbc177..27a4112848c2 100644
--- a/tools/objtool/orc_gen.c
+++ b/tools/objtool/orc_gen.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <stdlib.h>
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index 50af4e1274b3..fdbaa611146d 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -23,6 +11,7 @@
#include <stdlib.h>
#include <string.h>
+#include "builtin.h"
#include "special.h"
#include "warn.h"
@@ -42,6 +31,7 @@
#define ALT_NEW_LEN_OFFSET 11
#define X86_FEATURE_POPCNT (4*32+23)
+#define X86_FEATURE_SMAP (9*32+20)
struct special_entry {
const char *sec;
@@ -110,6 +100,22 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
*/
if (feature == X86_FEATURE_POPCNT)
alt->skip_orig = true;
+
+ /*
+ * If UACCESS validation is enabled; force that alternative;
+ * otherwise force it the other way.
+ *
+ * What we want to avoid is having both the original and the
+ * alternative code flow at the same time, in that case we can
+ * find paths that see the STAC but take the NOP instead of
+ * CLAC and the other way around.
+ */
+ if (feature == X86_FEATURE_SMAP) {
+ if (uaccess)
+ alt->skip_orig = true;
+ else
+ alt->skip_alt = true;
+ }
}
orig_rela = find_rela_by_dest(sec, offset + entry->orig);
diff --git a/tools/objtool/special.h b/tools/objtool/special.h
index fad1d092f679..35061530e46e 100644
--- a/tools/objtool/special.h
+++ b/tools/objtool/special.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SPECIAL_H
@@ -26,6 +14,7 @@ struct special_alt {
bool group;
bool skip_orig;
+ bool skip_alt;
bool jump_or_nop;
struct section *orig_sec;
diff --git a/tools/objtool/warn.h b/tools/objtool/warn.h
index afd9f7a05f6d..cbb0a02b7480 100644
--- a/tools/objtool/warn.h
+++ b/tools/objtool/warn.h
@@ -1,18 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _WARN_H
@@ -64,6 +52,14 @@ static inline char *offstr(struct section *sec, unsigned long offset)
free(_str); \
})
+#define BT_FUNC(format, insn, ...) \
+({ \
+ struct instruction *_insn = (insn); \
+ char *_str = offstr(_insn->sec, _insn->offset); \
+ WARN(" %s: " format, _str, ##__VA_ARGS__); \
+ free(_str); \
+})
+
#define WARN_ELF(format, ...) \
WARN(format ": %s", ##__VA_ARGS__, elf_errmsg(-1))
diff --git a/tools/pci/Makefile b/tools/pci/Makefile
index 46e4c2f318c9..6876ee4bd78c 100644
--- a/tools/pci/Makefile
+++ b/tools/pci/Makefile
@@ -14,9 +14,12 @@ MAKEFLAGS += -r
CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
-ALL_TARGETS := pcitest pcitest.sh
+ALL_TARGETS := pcitest
ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
+SCRIPTS := pcitest.sh
+ALL_SCRIPTS := $(patsubst %,$(OUTPUT)%,$(SCRIPTS))
+
all: $(ALL_PROGRAMS)
export srctree OUTPUT CC LD CFLAGS
@@ -44,8 +47,11 @@ clean:
install: $(ALL_PROGRAMS)
install -d -m 755 $(DESTDIR)$(bindir); \
- for program in $(ALL_PROGRAMS); do \
+ for program in $(ALL_PROGRAMS) pcitest.sh; do \
install $$program $(DESTDIR)$(bindir); \
+ done; \
+ for script in $(ALL_SCRIPTS); do \
+ install $$script $(DESTDIR)$(bindir); \
done
FORCE:
diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c
index ec4d51f3308b..cb7a47dfd8b6 100644
--- a/tools/pci/pcitest.c
+++ b/tools/pci/pcitest.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/**
* Userspace PCI Endpoint Test Module
*
* Copyright (C) 2017 Texas Instruments
* Author: Kishon Vijay Abraham I <kishon@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <errno.h>
@@ -140,6 +129,7 @@ static void run_test(struct pci_test *test)
}
fflush(stdout);
+ return (ret < 0) ? ret : 1 - ret; /* return 0 if test succeeded */
}
int main(int argc, char **argv)
@@ -162,7 +152,7 @@ int main(int argc, char **argv)
/* set default endpoint device */
test->device = "/dev/pci-endpoint-test.0";
- while ((c = getopt(argc, argv, "D:b:m:x:i:Ilrwcs:")) != EOF)
+ while ((c = getopt(argc, argv, "D:b:m:x:i:Ilhrwcs:")) != EOF)
switch (c) {
case 'D':
test->device = optarg;
@@ -206,7 +196,6 @@ int main(int argc, char **argv)
case 's':
test->size = strtoul(optarg, NULL, 0);
continue;
- case '?':
case 'h':
default:
usage:
@@ -224,10 +213,10 @@ usage:
"\t-w Write buffer test\n"
"\t-c Copy buffer test\n"
"\t-s <size> Size of buffer {default: 100KB}\n",
+ "\t-h Print this help message\n",
argv[0]);
return -EINVAL;
}
- run_test(test);
- return 0;
+ return run_test(test);
}
diff --git a/tools/pcmcia/crc32hash.c b/tools/pcmcia/crc32hash.c
index 44f8beea7260..1a18da9cb6a1 100644
--- a/tools/pcmcia/crc32hash.c
+++ b/tools/pcmcia/crc32hash.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* crc32hash.c - derived from linux/lib/crc32.c, GNU GPL v2 */
/* Usage example:
$ ./crc32hash "Dual Speed"
diff --git a/tools/perf/Build b/tools/perf/Build
index e5232d567611..5f392dbb88fc 100644
--- a/tools/perf/Build
+++ b/tools/perf/Build
@@ -46,10 +46,10 @@ CFLAGS_builtin-trace.o += -DSTRACE_GROUPS_DIR="BUILD_STR($(STRACE_GROUPS_DIR_
CFLAGS_builtin-report.o += -DTIPDIR="BUILD_STR($(tipdir_SQ))"
CFLAGS_builtin-report.o += -DDOCDIR="BUILD_STR($(srcdir_SQ)/Documentation)"
-libperf-y += util/
-libperf-y += arch/
-libperf-y += ui/
-libperf-y += scripts/
-libperf-$(CONFIG_TRACE) += trace/beauty/
+perf-y += util/
+perf-y += arch/
+perf-y += ui/
+perf-y += scripts/
+perf-$(CONFIG_TRACE) += trace/beauty/
gtk-y += ui/gtk/
diff --git a/tools/perf/Documentation/Build.txt b/tools/perf/Documentation/Build.txt
index f6fc6507ba55..3766886c4bca 100644
--- a/tools/perf/Documentation/Build.txt
+++ b/tools/perf/Documentation/Build.txt
@@ -47,3 +47,27 @@ Those objects are then used in final linking:
NOTE this description is omitting other libraries involved, only
focusing on build framework outcomes
+
+3) Build with ASan or UBSan
+==========================
+ $ cd tools/perf
+ $ make DESTDIR=/usr
+ $ make DESTDIR=/usr install
+
+AddressSanitizer (or ASan) is a GCC feature that detects memory corruption bugs
+such as buffer overflows and memory leaks.
+
+ $ cd tools/perf
+ $ make DEBUG=1 EXTRA_CFLAGS='-fno-omit-frame-pointer -fsanitize=address'
+ $ ASAN_OPTIONS=log_path=asan.log ./perf record -a
+
+ASan outputs all detected issues into a log file named 'asan.log.<pid>'.
+
+UndefinedBehaviorSanitizer (or UBSan) is a fast undefined behavior detector
+supported by GCC. UBSan detects undefined behaviors of programs at runtime.
+
+ $ cd tools/perf
+ $ make DEBUG=1 EXTRA_CFLAGS='-fno-omit-frame-pointer -fsanitize=undefined'
+ $ UBSAN_OPTIONS=print_stacktrace=1 ./perf record -a
+
+If UBSan detects any problem at runtime, it outputs a “runtime error:” message.
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
index ac841bc5c35b..6d148a40551c 100644
--- a/tools/perf/Documentation/Makefile
+++ b/tools/perf/Documentation/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
include ../../scripts/Makefile.include
include ../../scripts/utilities.mak
diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt
index 115eaacc455f..60d99e5e7921 100644
--- a/tools/perf/Documentation/intel-pt.txt
+++ b/tools/perf/Documentation/intel-pt.txt
@@ -88,16 +88,16 @@ smaller.
To represent software control flow, "branches" samples are produced. By default
a branch sample is synthesized for every single branch. To get an idea what
-data is available you can use the 'perf script' tool with no parameters, which
-will list all the samples.
+data is available you can use the 'perf script' tool with all itrace sampling
+options, which will list all the samples.
perf record -e intel_pt//u ls
- perf script
+ perf script --itrace=ibxwpe
An interesting field that is not printed by default is 'flags' which can be
displayed as follows:
- perf script -Fcomm,tid,pid,time,cpu,event,trace,ip,sym,dso,addr,symoff,flags
+ perf script --itrace=ibxwpe -F+flags
The flags are "bcrosyiABEx" which stand for branch, call, return, conditional,
system, asynchronous, interrupt, transaction abort, trace begin, trace end, and
@@ -713,7 +713,7 @@ Having no option is the same as
which, in turn, is the same as
- --itrace=ibxwpe
+ --itrace=cepwx
The letters are:
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index 4ac7775fbc11..462b3cde0675 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -114,12 +114,16 @@ Given a $HOME/.perfconfig like this:
[report]
# Defaults
- sort-order = comm,dso,symbol
+ sort_order = comm,dso,symbol
percent-limit = 0
queue-size = 0
children = true
group = true
+ [llvm]
+ dump-obj = true
+ clang-opt = -g
+
You can hide source code of annotate feature setting the config to false with
% perf config annotate.hide_src_code=true
@@ -553,6 +557,47 @@ trace.*::
trace.show_zeros::
Do not suppress syscall arguments that are equal to zero.
+llvm.*::
+ llvm.clang-path::
+ Path to clang. If omit, search it from $PATH.
+
+ llvm.clang-bpf-cmd-template::
+ Cmdline template. Below lines show its default value. Environment
+ variable is used to pass options.
+ "$CLANG_EXEC -D__KERNEL__ $CLANG_OPTIONS $KERNEL_INC_OPTIONS \
+ -Wno-unused-value -Wno-pointer-sign -working-directory \
+ $WORKING_DIR -c $CLANG_SOURCE -target bpf -O2 -o -"
+
+ llvm.clang-opt::
+ Options passed to clang.
+
+ llvm.kbuild-dir::
+ kbuild directory. If not set, use /lib/modules/`uname -r`/build.
+ If set to "" deliberately, skip kernel header auto-detector.
+
+ llvm.kbuild-opts::
+ Options passed to 'make' when detecting kernel header options.
+
+ llvm.dump-obj::
+ Enable perf dump BPF object files compiled by LLVM.
+
+ llvm.opts::
+ Options passed to llc.
+
+samples.*::
+
+ samples.context::
+ Define how many ns worth of time to show
+ around samples in perf report sample context browser.
+
+scripts.*::
+
+ Any option defines a script that is added to the scripts menu
+ in the interactive perf browser and whose output is displayed.
+ The name of the option is the name, the value is a script command line.
+ The script gets the same options passed as a full perf script,
+ in particular -i perfdata file, --cpu, --tid
+
SEE ALSO
--------
linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
index a79c84ae61aa..da7809b15cc9 100644
--- a/tools/perf/Documentation/perf-diff.txt
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -118,6 +118,62 @@ OPTIONS
sum of shown entries will be always 100%. "absolute" means it retains
the original value before and after the filter is applied.
+--time::
+ Analyze samples within given time window. It supports time
+ percent with multiple time ranges. Time string is 'a%/n,b%/m,...'
+ or 'a%-b%,c%-%d,...'.
+
+ For example:
+
+ Select the second 10% time slice to diff:
+
+ perf diff --time 10%/2
+
+ Select from 0% to 10% time slice to diff:
+
+ perf diff --time 0%-10%
+
+ Select the first and the second 10% time slices to diff:
+
+ perf diff --time 10%/1,10%/2
+
+ Select from 0% to 10% and 30% to 40% slices to diff:
+
+ perf diff --time 0%-10%,30%-40%
+
+ It also supports analyzing samples within a given time window
+ <start>,<stop>. Times have the format seconds.microseconds. If 'start'
+ is not given (i.e., time string is ',x.y') then analysis starts at
+ the beginning of the file. If stop time is not given (i.e, time
+ string is 'x.y,') then analysis goes to the end of the file. Time string is
+ 'a1.b1,c1.d1:a2.b2,c2.d2'. Use ':' to separate timestamps for different
+ perf.data files.
+
+ For example, we get the timestamp information from 'perf script'.
+
+ perf script -i perf.data.old
+ mgen 13940 [000] 3946.361400: ...
+
+ perf script -i perf.data
+ mgen 13940 [000] 3971.150589 ...
+
+ perf diff --time 3946.361400,:3971.150589,
+
+ It analyzes the perf.data.old from the timestamp 3946.361400 to
+ the end of perf.data.old and analyzes the perf.data from the
+ timestamp 3971.150589 to the end of perf.data.
+
+--cpu:: Only diff samples for the list of CPUs provided. Multiple CPUs can
+ be provided as a comma-separated list with no space: 0,1. Ranges of
+ CPUs are specified with -: 0-2. Default is to report samples on all
+ CPUs.
+
+--pid=::
+ Only diff samples for given process ID (comma separated list).
+
+--tid=::
+ Only diff samples for given thread ID (comma separated list).
+
COMPARISON
----------
The comparison is governed by the baseline file. The baseline perf.data
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 138fb6e94b3c..18ed1b0fceb3 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -199,6 +199,18 @@ also be supplied. For example:
perf stat -C 0 -e 'hv_gpci/dtbp_ptitc,phys_processor_idx=0x2/' ...
+EVENT QUALIFIERS:
+
+It is also possible to add extra qualifiers to an event:
+
+percore:
+
+Sums up the event counts for all hardware threads in a core, e.g.:
+
+
+ perf stat -e cpu/event=0,umask=0x3,percore=1/
+
+
EVENT GROUPS
------------
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index d232b13ea713..de269430720a 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -88,6 +88,20 @@ OPTIONS
If you want to profile write accesses in [0x1000~1008), just set
'mem:0x1000/8:w'.
+ - a BPF source file (ending in .c) or a precompiled object file (ending
+ in .o) selects one or more BPF events.
+ The BPF program can attach to various perf events based on the ELF section
+ names.
+
+ When processing a '.c' file, perf searches an installed LLVM to compile it
+ into an object file first. Optional clang options can be passed via the
+ '--clang-opt' command line option, e.g.:
+
+ perf record --clang-opt "-DLINUX_VERSION_CODE=0x50000" \
+ -e tests/bpf-script-example.c
+
+ Note: '--clang-opt' must be placed before '--event/-e'.
+
- a group of events surrounded by a pair of brace ("{event1,event2,...}").
Each event is separated by commas and the group should be quoted to
prevent the shell interpretation. You also need to use --group on
@@ -392,7 +406,8 @@ symbolic names, e.g. on x86, ax, si. To list the available registers use
--intr-regs=ax,bx. The list of register is architecture dependent.
--user-regs::
-Capture user registers at sample time. Same arguments as -I.
+Similar to -I, but capture user registers at sample time. To list the available
+user registers use --user-regs=\?.
--running-time::
Record running and enabled time for read events (:S)
@@ -440,6 +455,35 @@ Use <n> control blocks in asynchronous (Posix AIO) trace writing mode (default:
Asynchronous mode is supported only when linking Perf tool with libc library
providing implementation for Posix AIO API.
+--affinity=mode::
+Set affinity mask of trace reading thread according to the policy defined by 'mode' value:
+ node - thread affinity mask is set to NUMA node cpu mask of the processed mmap buffer
+ cpu - thread affinity mask is set to cpu of the processed mmap buffer
+
+--mmap-flush=number::
+
+Specify minimal number of bytes that is extracted from mmap data pages and
+processed for output. One can specify the number using B/K/M/G suffixes.
+
+The maximal allowed value is a quarter of the size of mmaped data pages.
+
+The default option value is 1 byte which means that every time that the output
+writing thread finds some new data in the mmaped buffer the data is extracted,
+possibly compressed (-z) and written to the output, perf.data or pipe.
+
+Larger data chunks are compressed more effectively in comparison to smaller
+chunks so extraction of larger chunks from the mmap data pages is preferable
+from the perspective of output size reduction.
+
+Also at some cases executing less output write syscalls with bigger data size
+can take less time than executing more output write syscalls with smaller data
+size thus lowering runtime profiling overhead.
+
+-z::
+--compression-level[=n]::
+Produce compressed trace using specified level n (default: 1 - fastest compression,
+22 - smallest trace)
+
--all-kernel::
Configure all used events to run in kernel space.
@@ -476,6 +520,10 @@ overhead. You can still switch them on with:
--switch-output --no-no-buildid --no-no-buildid-cache
+--switch-max-files=N::
+
+When rotating perf.data with --switch-output, only keep N files.
+
--dry-run::
Parse options then exit. --dry-run can be used to detect errors in cmdline
options.
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 1a27bfe05039..f441baa794ce 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -105,6 +105,8 @@ OPTIONS
guest machine
- sample: Number of sample
- period: Raw number of event count of sample
+ - time: Separate the samples by time stamp with the resolution specified by
+ --time-quantum (default 100ms). Specify with overhead and before it.
By default, comm, dso and symbol keys are used.
(i.e. --sort comm,dso,symbol)
@@ -459,6 +461,10 @@ include::itrace.txt[]
--socket-filter::
Only report the samples on the processor socket that match with this filter
+--samples=N::
+ Save N individual samples for each histogram entry to show context in perf
+ report tui browser.
+
--raw-trace::
When displaying traceevent output, do not use print fmt or plugins.
@@ -477,6 +483,9 @@ include::itrace.txt[]
Please note that not all mmaps are stored, options affecting which ones
are include 'perf record --data', for instance.
+--ns::
+ Show time stamps in nanoseconds.
+
--stats::
Display overall events statistics without any further processing.
(like the one at the end of the perf report -D command)
@@ -494,6 +503,10 @@ include::itrace.txt[]
The period/hits keywords set the base the percentage is computed
on - the samples period or the number of samples (hits).
+--time-quantum::
+ Configure time quantum for time sort key. Default 100ms.
+ Accepts s, us, ms, ns units.
+
include::callchain-overhead-calculation.txt[]
SEE ALSO
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 9e4def08d569..9b0d04dd2a61 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -159,6 +159,12 @@ OPTIONS
the override, and the result of the above is that only S/W and H/W
events are displayed with the given fields.
+ It's possible tp add/remove fields only for specific event type:
+
+ -Fsw:-cpu,-period
+
+ removes cpu and period from software events.
+
For the 'wildcard' option if a user selected field is invalid for an
event type, a message is displayed to the user that the option is
ignored for that type. For example:
@@ -374,6 +380,9 @@ include::itrace.txt[]
Set the maximum number of program blocks to print with brstackasm for
each sample.
+--reltime::
+ Print time stamps relative to trace start.
+
--per-event-dump::
Create per event files with a "perf.data.EVENT.dump" name instead of
printing to stdout, useful, for instance, for generating flamegraphs.
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 4bc2085e5197..1e312c2672e4 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -43,6 +43,10 @@ report::
param1 and param2 are defined as formats for the PMU in
/sys/bus/event_source/devices/<pmu>/format/*
+ 'percore' is a event qualifier that sums up the event counts for both
+ hardware threads in a core. For example:
+ perf stat -A -a -e cpu/event,percore=1/,otherevent ...
+
- a symbolically formed event like 'pmu/config=M,config1=N,config2=K/'
where M, N, K are numbers (in decimal, hex, octal format).
Acceptable values for each of 'config', 'config1' and 'config2'
@@ -72,9 +76,8 @@ report::
--all-cpus::
system-wide collection from all CPUs (default if no target is specified)
--c::
---scale::
- scale/normalize counter values
+--no-scale::
+ Don't scale/normalize counter values
-d::
--detailed::
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 631e687be4eb..fc6e43262c41 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -210,6 +210,14 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
may happen, for instance, when a thread gets migrated to a different CPU
while processing a syscall.
+--map-dump::
+ Dump BPF maps setup by events passed via -e, for instance the augmented_raw_syscalls
+ living in tools/perf/examples/bpf/augmented_raw_syscalls.c. For now this
+ dumps just boolean map values and integer keys, in time this will print in hex
+ by default and use BTF when available, as well as use functions to do pretty
+ printing using the existing 'perf trace' syscall arg beautifiers to map integer
+ arguments to strings (pid to comm, syscall id to syscall name, etc).
+
PAGEFAULTS
----------
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index dfb218feaad9..6967e9b02be5 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -43,11 +43,10 @@ struct perf_file_section {
Flags section:
-The header is followed by different optional headers, described by the bits set
-in flags. Only headers for which the bit is set are included. Each header
-consists of a perf_file_section located after the initial header.
-The respective perf_file_section points to the data of the additional
-header and defines its size.
+For each of the optional features a perf_file_section it placed after the data
+section if the feature bit is set in the perf_header flags bitset. The
+respective perf_file_section points to the data of the additional header and
+defines its size.
Some headers consist of strings, which are defined like this:
@@ -131,7 +130,7 @@ An uint64_t with the total memory in bytes.
HEADER_CMDLINE = 11,
-A perf_header_string with the perf command line used to collect the data.
+A perf_header_string_list with the perf arg-vector used to collect the data.
HEADER_EVENT_DESC = 12,
@@ -273,6 +272,19 @@ struct {
Two uint64_t for the time of first sample and the time of last sample.
+ HEADER_COMPRESSED = 27,
+
+struct {
+ u32 version;
+ u32 type;
+ u32 level;
+ u32 ratio;
+ u32 mmap_len;
+};
+
+Indicates that trace contains records of PERF_RECORD_COMPRESSED type
+that have perf_events records in compressed form.
+
other bits are reserved and should ignored for now
HEADER_FEAT_BITS = 256,
@@ -438,6 +450,17 @@ struct auxtrace_error_event {
Describes a header feature. These are records used in pipe-mode that
contain information that otherwise would be in perf.data file's header.
+ PERF_RECORD_COMPRESSED = 81,
+
+struct compressed_event {
+ struct perf_event_header header;
+ char data[];
+};
+
+The header is followed by compressed data frame that can be decompressed
+into array of perf trace records. The size of the entire compressed event
+record including the header is limited by the max value of header.size.
+
Event types
Define the event attributes with their IDs.
diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt
index 864e37597252..401f0ed67439 100644
--- a/tools/perf/Documentation/perf.txt
+++ b/tools/perf/Documentation/perf.txt
@@ -22,6 +22,8 @@ OPTIONS
verbose - general debug messages
ordered-events - ordered events object debug messages
data-convert - data convert command debug messages
+ stderr - write debug output (option -v) to stderr
+ in browser mode
--buildid-dir::
Setup buildid cache directory. It has higher priority than
diff --git a/tools/perf/Documentation/tips.txt b/tools/perf/Documentation/tips.txt
index 849599f39c5e..869965d629ce 100644
--- a/tools/perf/Documentation/tips.txt
+++ b/tools/perf/Documentation/tips.txt
@@ -15,6 +15,7 @@ To see callchains in a more compact form: perf report -g folded
Show individual samples with: perf script
Limit to show entries above 5% only: perf report --percent-limit 5
Profiling branch (mis)predictions with: perf record -b / perf report
+To show assembler sample contexts use perf record -b / perf script -F +brstackinsn --xed
Treat branches as callchains: perf report --branch-history
To count events in every 1000 msec: perf stat -I 1000
Print event counts in CSV format with: perf stat -x,
@@ -34,3 +35,9 @@ Show current config key-value pairs: perf config --list
Show user configuration overrides: perf config --user --list
To add Node.js USDT(User-Level Statically Defined Tracing): perf buildid-cache --add `which node`
To report cacheline events from previous recording: perf c2c report
+To browse sample contexts use perf report --sample 10 and select in context menu
+To separate samples by time use perf report --sort time,overhead,sym
+To set sample time separation other than 100ms with --sort time use --time-quantum
+Add -I to perf report to sample register values visible in perf report context.
+To show IPC for sampling periods use perf record -e '{cycles,instructions}:S' and then browse context
+To show context switches in perf report sample context add --switch-events to perf record.
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index b441c88cafa1..85fbcd265351 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
ifeq ($(src-perf),)
src-perf := $(srctree)/tools/perf
@@ -59,6 +60,10 @@ ifeq ($(SRCARCH),arm64)
LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
endif
+ifeq ($(SRCARCH),csky)
+ NO_PERF_REGS := 0
+endif
+
ifeq ($(ARCH),s390)
NO_PERF_REGS := 0
NO_SYSCALL_TABLE := 0
@@ -77,7 +82,7 @@ endif
# Disable it on all other architectures in case libdw unwind
# support is detected in system. Add supported architectures
# to the check.
-ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc s390))
+ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc s390 csky))
NO_LIBDW_DWARF_UNWIND := 1
endif
@@ -109,6 +114,13 @@ FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
+FEATURE_CHECK_LDFLAGS-libunwind-arm = -lunwind -lunwind-arm
+FEATURE_CHECK_LDFLAGS-libunwind-aarch64 = -lunwind -lunwind-aarch64
+FEATURE_CHECK_LDFLAGS-libunwind-x86 = -lunwind -llzma -lunwind-x86
+FEATURE_CHECK_LDFLAGS-libunwind-x86_64 = -lunwind -llzma -lunwind-x86_64
+
+FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto
+
ifdef CSINCLUDES
LIBOPENCSD_CFLAGS := -I$(CSINCLUDES)
endif
@@ -145,6 +157,13 @@ endif
FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS)
FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
+ifdef LIBZSTD_DIR
+ LIBZSTD_CFLAGS := -I$(LIBZSTD_DIR)/lib
+ LIBZSTD_LDFLAGS := -L$(LIBZSTD_DIR)/lib
+endif
+FEATURE_CHECK_CFLAGS-libzstd := $(LIBZSTD_CFLAGS)
+FEATURE_CHECK_LDFLAGS-libzstd := $(LIBZSTD_LDFLAGS)
+
FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi
# include ARCH specific config
-include $(src-perf)/arch/$(SRCARCH)/Makefile
@@ -218,6 +237,10 @@ FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
+FEATURE_CHECK_LDFLAGS-libaio = -lrt
+
+FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes -ldl
+
CFLAGS += -fno-omit-frame-pointer
CFLAGS += -ggdb3
CFLAGS += -funwind-tables
@@ -386,7 +409,8 @@ ifeq ($(feature-setns), 1)
$(call detected,CONFIG_SETNS)
endif
-ifndef NO_CORESIGHT
+ifdef CORESIGHT
+ $(call feature_check,libopencsd)
ifeq ($(feature-libopencsd), 1)
CFLAGS += -DHAVE_CSTRACE_SUPPORT $(LIBOPENCSD_CFLAGS)
LDFLAGS += $(LIBOPENCSD_LDFLAGS)
@@ -482,6 +506,7 @@ endif
ifndef NO_LIBUNWIND
have_libunwind :=
+ $(call feature_check,libunwind-x86)
ifeq ($(feature-libunwind-x86), 1)
$(call detected,CONFIG_LIBUNWIND_X86)
CFLAGS += -DHAVE_LIBUNWIND_X86_SUPPORT
@@ -490,6 +515,7 @@ ifndef NO_LIBUNWIND
have_libunwind = 1
endif
+ $(call feature_check,libunwind-aarch64)
ifeq ($(feature-libunwind-aarch64), 1)
$(call detected,CONFIG_LIBUNWIND_AARCH64)
CFLAGS += -DHAVE_LIBUNWIND_AARCH64_SUPPORT
@@ -701,7 +727,7 @@ else
endif
ifeq ($(feature-libbfd), 1)
- EXTLIBS += -lbfd
+ EXTLIBS += -lbfd -lopcodes
else
# we are on a system that requires -liberty and (maybe) -lz
# to link against -lbfd; test each case individually here
@@ -712,12 +738,15 @@ else
$(call feature_check,libbfd-liberty-z)
ifeq ($(feature-libbfd-liberty), 1)
- EXTLIBS += -lbfd -liberty
+ EXTLIBS += -lbfd -lopcodes -liberty
+ FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -ldl
else
ifeq ($(feature-libbfd-liberty-z), 1)
- EXTLIBS += -lbfd -liberty -lz
+ EXTLIBS += -lbfd -lopcodes -liberty -lz
+ FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -lz -ldl
endif
endif
+ $(call feature_check,disassembler-four-args)
endif
ifdef NO_DEMANGLE
@@ -770,6 +799,19 @@ ifndef NO_LZMA
endif
endif
+ifndef NO_LIBZSTD
+ ifeq ($(feature-libzstd), 1)
+ CFLAGS += -DHAVE_ZSTD_SUPPORT
+ CFLAGS += $(LIBZSTD_CFLAGS)
+ LDFLAGS += $(LIBZSTD_LDFLAGS)
+ EXTLIBS += -lzstd
+ $(call detected,CONFIG_ZSTD)
+ else
+ msg := $(warning No libzstd found, disables trace compression, please install libzstd-dev[el] and/or set LIBZSTD_DIR);
+ NO_LIBZSTD := 1
+ endif
+endif
+
ifndef NO_BACKTRACE
ifeq ($(feature-backtrace), 1)
CFLAGS += -DHAVE_BACKTRACE_SUPPORT
@@ -796,6 +838,10 @@ ifdef HAVE_KVM_STAT_SUPPORT
CFLAGS += -DHAVE_KVM_STAT_SUPPORT
endif
+ifeq ($(feature-disassembler-four-args), 1)
+ CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+endif
+
ifeq (${IS_64_BIT}, 1)
ifndef NO_PERF_READ_VDSO32
$(call feature_check,compile-32)
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 0ee6795d82cc..4d46ca6d7e20 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
include ../scripts/Makefile.include
include ../scripts/Makefile.arch
@@ -102,12 +103,15 @@ include ../scripts/utilities.mak
# When selected, pass LLVM_CONFIG=/path/to/llvm-config to `make' if
# llvm-config is not in $PATH.
#
-# Define NO_CORESIGHT if you do not want support for CoreSight trace decoding.
+# Define CORESIGHT if you DO WANT support for CoreSight trace decoding.
#
# Define NO_AIO if you do not want support of Posix AIO based trace
# streaming for record mode. Currently Posix AIO trace streaming is
# supported only when linking with glibc.
#
+# Define NO_LIBZSTD if you do not want support of Zstandard based runtime
+# trace compression in record mode.
+#
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
@@ -344,9 +348,9 @@ endif
export PERL_PATH
-LIB_FILE=$(OUTPUT)libperf.a
+LIBPERF_A=$(OUTPUT)libperf.a
-PERFLIBS = $(LIB_FILE) $(LIBAPI) $(LIBTRACEEVENT) $(LIBSUBCMD)
+PERFLIBS = $(LIBAPI) $(LIBTRACEEVENT) $(LIBSUBCMD)
ifndef NO_LIBBPF
PERFLIBS += $(LIBBPF)
endif
@@ -481,8 +485,8 @@ $(madvise_behavior_array): $(madvise_hdr_dir)/mman-common.h $(madvise_behavior_t
mmap_flags_array := $(beauty_outdir)/mmap_flags_array.c
mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh
-$(mmap_flags_array): $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl)
- $(Q)$(SHELL) '$(mmap_flags_tbl)' $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@
+$(mmap_flags_array): $(linux_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl)
+ $(Q)$(SHELL) '$(mmap_flags_tbl)' $(linux_uapi_dir) $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@
mount_flags_array := $(beauty_outdir)/mount_flags_array.c
mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/mount_flags.sh
@@ -549,6 +553,8 @@ JEVENTS_IN := $(OUTPUT)pmu-events/jevents-in.o
PMU_EVENTS_IN := $(OUTPUT)pmu-events/pmu-events-in.o
+LIBPERF_IN := $(OUTPUT)libperf-in.o
+
export JEVENTS
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
@@ -565,9 +571,12 @@ $(JEVENTS): $(JEVENTS_IN)
$(PMU_EVENTS_IN): $(JEVENTS) FORCE
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=pmu-events obj=pmu-events
-$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
+$(LIBPERF_IN): prepare FORCE
+ $(Q)$(MAKE) $(build)=libperf
+
+$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) $(LIBPERF_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS) \
- $(PERF_IN) $(PMU_EVENTS_IN) $(LIBS) -o $@
+ $(PERF_IN) $(PMU_EVENTS_IN) $(LIBPERF_IN) $(LIBS) -o $@
$(GTK_IN): FORCE
$(Q)$(MAKE) $(build)=gtk
@@ -683,12 +692,7 @@ endif
$(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h)
-LIBPERF_IN := $(OUTPUT)libperf-in.o
-
-$(LIBPERF_IN): prepare FORCE
- $(Q)$(MAKE) $(build)=libperf
-
-$(LIB_FILE): $(LIBPERF_IN)
+$(LIBPERF_A): $(LIBPERF_IN)
$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBPERF_IN) $(LIB_OBJS)
LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(LDFLAGS)'
@@ -863,8 +867,8 @@ ifndef NO_LIBPYTHON
$(call QUIET_INSTALL, python-scripts) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'; \
- $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \
- $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'; \
+ $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \
+ $(INSTALL) scripts/python/*.py -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'; \
$(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
endif
$(call QUIET_INSTALL, perf_completion-script) \
@@ -910,7 +914,7 @@ python-clean:
$(python-clean)
clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean config-clean fixdep-clean python-clean
- $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
+ $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
$(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
$(Q)$(RM) $(OUTPUT).config-detected
$(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)pmu-events/jevents $(OUTPUT)$(LIBJVMTI).so
diff --git a/tools/perf/arch/Build b/tools/perf/arch/Build
index d9b6af837c7d..688818844c11 100644
--- a/tools/perf/arch/Build
+++ b/tools/perf/arch/Build
@@ -1,2 +1,2 @@
-libperf-y += common.o
-libperf-y += $(SRCARCH)/
+perf-y += common.o
+perf-y += $(SRCARCH)/
diff --git a/tools/perf/arch/arm/Build b/tools/perf/arch/arm/Build
index 41bf61da476a..36222e64bbf7 100644
--- a/tools/perf/arch/arm/Build
+++ b/tools/perf/arch/arm/Build
@@ -1,2 +1,2 @@
-libperf-y += util/
-libperf-$(CONFIG_DWARF_UNWIND) += tests/
+perf-y += util/
+perf-$(CONFIG_DWARF_UNWIND) += tests/
diff --git a/tools/perf/arch/arm/Makefile b/tools/perf/arch/arm/Makefile
index 18b13518d8d8..1d88fdab13bf 100644
--- a/tools/perf/arch/arm/Makefile
+++ b/tools/perf/arch/arm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
endif
diff --git a/tools/perf/arch/arm/tests/Build b/tools/perf/arch/arm/tests/Build
index d9ae2733f9cc..bc8e97380c82 100644
--- a/tools/perf/arch/arm/tests/Build
+++ b/tools/perf/arch/arm/tests/Build
@@ -1,5 +1,5 @@
-libperf-y += regs_load.o
-libperf-y += dwarf-unwind.o
-libperf-y += vectors-page.o
+perf-y += regs_load.o
+perf-y += dwarf-unwind.o
+perf-y += vectors-page.o
-libperf-y += arch-tests.o
+perf-y += arch-tests.o
diff --git a/tools/perf/arch/arm/tests/dwarf-unwind.c b/tools/perf/arch/arm/tests/dwarf-unwind.c
index 9a0242e74cfc..2c35e532bc9a 100644
--- a/tools/perf/arch/arm/tests/dwarf-unwind.c
+++ b/tools/perf/arch/arm/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
+#include "map_groups.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
diff --git a/tools/perf/arch/arm/util/Build b/tools/perf/arch/arm/util/Build
index e64c5f216448..296f0eac5e18 100644
--- a/tools/perf/arch/arm/util/Build
+++ b/tools/perf/arch/arm/util/Build
@@ -1,6 +1,6 @@
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-$(CONFIG_AUXTRACE) += pmu.o auxtrace.o cs-etm.o
+perf-$(CONFIG_AUXTRACE) += pmu.o auxtrace.o cs-etm.o
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 2f595cd73da6..911426721170 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -5,6 +5,7 @@
*/
#include <api/fs/fs.h>
+#include <linux/bits.h>
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/coresight-pmu.h>
@@ -22,12 +23,10 @@
#include "../../util/thread_map.h"
#include "../../util/cs-etm.h"
+#include <errno.h>
#include <stdlib.h>
#include <sys/stat.h>
-#define ENABLE_SINK_MAX 128
-#define CS_BUS_DEVICE_PATH "/bus/coresight/devices/"
-
struct cs_etm_recording {
struct auxtrace_record itr;
struct perf_pmu *cs_etm_pmu;
@@ -60,10 +59,48 @@ static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
return 0;
}
+static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
+ struct perf_evsel *evsel)
+{
+ char msg[BUFSIZ], path[PATH_MAX], *sink;
+ struct perf_evsel_config_term *term;
+ int ret = -EINVAL;
+ u32 hash;
+
+ if (evsel->attr.config2 & GENMASK(31, 0))
+ return 0;
+
+ list_for_each_entry(term, &evsel->config_terms, list) {
+ if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG)
+ continue;
+
+ sink = term->val.drv_cfg;
+ snprintf(path, PATH_MAX, "sinks/%s", sink);
+
+ ret = perf_pmu__scan_file(pmu, path, "%x", &hash);
+ if (ret != 1) {
+ pr_err("failed to set sink \"%s\" on event %s with %d (%s)\n",
+ sink, perf_evsel__name(evsel), errno,
+ str_error_r(errno, msg, sizeof(msg)));
+ return ret;
+ }
+
+ evsel->attr.config2 |= hash;
+ return 0;
+ }
+
+ /*
+ * No sink was provided on the command line - for _now_ treat
+ * this as an error.
+ */
+ return ret;
+}
+
static int cs_etm_recording_options(struct auxtrace_record *itr,
struct perf_evlist *evlist,
struct record_opts *opts)
{
+ int ret;
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
@@ -92,6 +129,10 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
if (!cs_etm_evsel)
return 0;
+ ret = cs_etm_set_sink_attr(cs_etm_pmu, cs_etm_evsel);
+ if (ret)
+ return ret;
+
if (opts->use_clockid) {
pr_err("Cannot use clockid (-k option) with %s\n",
CORESIGHT_ETM_PMU_NAME);
@@ -598,54 +639,3 @@ struct auxtrace_record *cs_etm_record_init(int *err)
out:
return NULL;
}
-
-static FILE *cs_device__open_file(const char *name)
-{
- struct stat st;
- char path[PATH_MAX];
- const char *sysfs;
-
- sysfs = sysfs__mountpoint();
- if (!sysfs)
- return NULL;
-
- snprintf(path, PATH_MAX,
- "%s" CS_BUS_DEVICE_PATH "%s", sysfs, name);
-
- if (stat(path, &st) < 0)
- return NULL;
-
- return fopen(path, "w");
-
-}
-
-static int __printf(2, 3) cs_device__print_file(const char *name, const char *fmt, ...)
-{
- va_list args;
- FILE *file;
- int ret = -EINVAL;
-
- va_start(args, fmt);
- file = cs_device__open_file(name);
- if (file) {
- ret = vfprintf(file, fmt, args);
- fclose(file);
- }
- va_end(args);
- return ret;
-}
-
-int cs_etm_set_drv_config(struct perf_evsel_config_term *term)
-{
- int ret;
- char enable_sink[ENABLE_SINK_MAX];
-
- snprintf(enable_sink, ENABLE_SINK_MAX, "%s/%s",
- term->val.drv_cfg, "enable_sink");
-
- ret = cs_device__print_file(enable_sink, "%d", 1);
- if (ret < 0)
- return ret;
-
- return 0;
-}
diff --git a/tools/perf/arch/arm/util/cs-etm.h b/tools/perf/arch/arm/util/cs-etm.h
index 1a12e64f5127..a3354bda4fe8 100644
--- a/tools/perf/arch/arm/util/cs-etm.h
+++ b/tools/perf/arch/arm/util/cs-etm.h
@@ -7,9 +7,6 @@
#ifndef INCLUDE__PERF_CS_ETM_H__
#define INCLUDE__PERF_CS_ETM_H__
-#include "../../util/evsel.h"
-
struct auxtrace_record *cs_etm_record_init(int *err);
-int cs_etm_set_drv_config(struct perf_evsel_config_term *term);
#endif
diff --git a/tools/perf/arch/arm/util/dwarf-regs.c b/tools/perf/arch/arm/util/dwarf-regs.c
index 8bb176a37990..fc5f71c91802 100644
--- a/tools/perf/arch/arm/util/dwarf-regs.c
+++ b/tools/perf/arch/arm/util/dwarf-regs.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Mapping of DWARF debug register numbers into register names.
*
* Copyright (C) 2010 Will Deacon, ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <stddef.h>
diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c
index e047571e6080..bbc297a7e2e3 100644
--- a/tools/perf/arch/arm/util/pmu.c
+++ b/tools/perf/arch/arm/util/pmu.c
@@ -7,8 +7,8 @@
#include <string.h>
#include <linux/coresight-pmu.h>
#include <linux/perf_event.h>
+#include <linux/string.h>
-#include "cs-etm.h"
#include "arm-spe.h"
#include "../../util/pmu.h"
@@ -19,7 +19,6 @@ struct perf_event_attr
if (!strcmp(pmu->name, CORESIGHT_ETM_PMU_NAME)) {
/* add ETM default config here */
pmu->selectable = true;
- pmu->set_drv_config = cs_etm_set_drv_config;
#if defined(__aarch64__)
} else if (strstarts(pmu->name, ARM_SPE_PMU_NAME)) {
return arm_spe_pmu_default_config(pmu);
diff --git a/tools/perf/arch/arm64/Build b/tools/perf/arch/arm64/Build
index 41bf61da476a..36222e64bbf7 100644
--- a/tools/perf/arch/arm64/Build
+++ b/tools/perf/arch/arm64/Build
@@ -1,2 +1,2 @@
-libperf-y += util/
-libperf-$(CONFIG_DWARF_UNWIND) += tests/
+perf-y += util/
+perf-$(CONFIG_DWARF_UNWIND) += tests/
diff --git a/tools/perf/arch/arm64/annotate/instructions.c b/tools/perf/arch/arm64/annotate/instructions.c
index 76c6345a57d5..8f70a1b282df 100644
--- a/tools/perf/arch/arm64/annotate/instructions.c
+++ b/tools/perf/arch/arm64/annotate/instructions.c
@@ -58,7 +58,7 @@ out_free_source:
}
static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops);
+ struct ins_operands *ops, int max_ins_name);
static struct ins_ops arm64_mov_ops = {
.parse = arm64_mov__parse,
diff --git a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
index c88fd32563eb..459469b7222c 100755
--- a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
+++ b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
@@ -56,7 +56,7 @@ create_table()
echo "};"
}
-$gcc -E -dM -x c $input \
+$gcc -E -dM -x c -I $incpath/include/uapi $input \
|sed -ne 's/^#define __NR_//p' \
|sort -t' ' -k2 -nu \
|create_table
diff --git a/tools/perf/arch/arm64/tests/Build b/tools/perf/arch/arm64/tests/Build
index 883c57ff0c08..41707fea74b3 100644
--- a/tools/perf/arch/arm64/tests/Build
+++ b/tools/perf/arch/arm64/tests/Build
@@ -1,4 +1,4 @@
-libperf-y += regs_load.o
-libperf-y += dwarf-unwind.o
+perf-y += regs_load.o
+perf-y += dwarf-unwind.o
-libperf-y += arch-tests.o
+perf-y += arch-tests.o
diff --git a/tools/perf/arch/arm64/tests/dwarf-unwind.c b/tools/perf/arch/arm64/tests/dwarf-unwind.c
index 5522ce384723..a6a407fa1b8b 100644
--- a/tools/perf/arch/arm64/tests/dwarf-unwind.c
+++ b/tools/perf/arch/arm64/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
+#include "map_groups.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build
index 68f8a8eb3ad0..3cde540d2fcf 100644
--- a/tools/perf/arch/arm64/util/Build
+++ b/tools/perf/arch/arm64/util/Build
@@ -1,10 +1,10 @@
-libperf-y += header.o
-libperf-y += sym-handling.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-y += header.o
+perf-y += sym-handling.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
+perf-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
../../arm/util/auxtrace.o \
../../arm/util/cs-etm.o \
arm-spe.o
diff --git a/tools/perf/arch/arm64/util/dwarf-regs.c b/tools/perf/arch/arm64/util/dwarf-regs.c
index cd764a9fd098..b047b882c5b1 100644
--- a/tools/perf/arch/arm64/util/dwarf-regs.c
+++ b/tools/perf/arch/arm64/util/dwarf-regs.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Mapping of DWARF debug register numbers into register names.
*
* Copyright (C) 2010 Will Deacon, ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <errno.h>
diff --git a/tools/perf/arch/arm64/util/sym-handling.c b/tools/perf/arch/arm64/util/sym-handling.c
index 0051b1ee8450..27fcf24d6850 100644
--- a/tools/perf/arch/arm64/util/sym-handling.c
+++ b/tools/perf/arch/arm64/util/sym-handling.c
@@ -1,7 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
*
* Copyright (C) 2015 Naveen N. Rao, IBM Corporation
*/
diff --git a/tools/perf/arch/csky/Build b/tools/perf/arch/csky/Build
new file mode 100644
index 000000000000..e4e5f33c84d8
--- /dev/null
+++ b/tools/perf/arch/csky/Build
@@ -0,0 +1 @@
+perf-y += util/
diff --git a/tools/perf/arch/csky/Makefile b/tools/perf/arch/csky/Makefile
new file mode 100644
index 000000000000..88c08eed9c7b
--- /dev/null
+++ b/tools/perf/arch/csky/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ifndef NO_DWARF
+PERF_HAVE_DWARF_REGS := 1
+endif
diff --git a/tools/perf/arch/csky/include/perf_regs.h b/tools/perf/arch/csky/include/perf_regs.h
new file mode 100644
index 000000000000..8f336ea1161a
--- /dev/null
+++ b/tools/perf/arch/csky/include/perf_regs.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef ARCH_PERF_REGS_H
+#define ARCH_PERF_REGS_H
+
+#include <stdlib.h>
+#include <linux/types.h>
+#include <asm/perf_regs.h>
+
+#define PERF_REGS_MASK ((1ULL << PERF_REG_CSKY_MAX) - 1)
+#define PERF_REGS_MAX PERF_REG_CSKY_MAX
+#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32
+
+#define PERF_REG_IP PERF_REG_CSKY_PC
+#define PERF_REG_SP PERF_REG_CSKY_SP
+
+static inline const char *perf_reg_name(int id)
+{
+ switch (id) {
+ case PERF_REG_CSKY_A0:
+ return "a0";
+ case PERF_REG_CSKY_A1:
+ return "a1";
+ case PERF_REG_CSKY_A2:
+ return "a2";
+ case PERF_REG_CSKY_A3:
+ return "a3";
+ case PERF_REG_CSKY_REGS0:
+ return "regs0";
+ case PERF_REG_CSKY_REGS1:
+ return "regs1";
+ case PERF_REG_CSKY_REGS2:
+ return "regs2";
+ case PERF_REG_CSKY_REGS3:
+ return "regs3";
+ case PERF_REG_CSKY_REGS4:
+ return "regs4";
+ case PERF_REG_CSKY_REGS5:
+ return "regs5";
+ case PERF_REG_CSKY_REGS6:
+ return "regs6";
+ case PERF_REG_CSKY_REGS7:
+ return "regs7";
+ case PERF_REG_CSKY_REGS8:
+ return "regs8";
+ case PERF_REG_CSKY_REGS9:
+ return "regs9";
+ case PERF_REG_CSKY_SP:
+ return "sp";
+ case PERF_REG_CSKY_LR:
+ return "lr";
+ case PERF_REG_CSKY_PC:
+ return "pc";
+#if defined(__CSKYABIV2__)
+ case PERF_REG_CSKY_EXREGS0:
+ return "exregs0";
+ case PERF_REG_CSKY_EXREGS1:
+ return "exregs1";
+ case PERF_REG_CSKY_EXREGS2:
+ return "exregs2";
+ case PERF_REG_CSKY_EXREGS3:
+ return "exregs3";
+ case PERF_REG_CSKY_EXREGS4:
+ return "exregs4";
+ case PERF_REG_CSKY_EXREGS5:
+ return "exregs5";
+ case PERF_REG_CSKY_EXREGS6:
+ return "exregs6";
+ case PERF_REG_CSKY_EXREGS7:
+ return "exregs7";
+ case PERF_REG_CSKY_EXREGS8:
+ return "exregs8";
+ case PERF_REG_CSKY_EXREGS9:
+ return "exregs9";
+ case PERF_REG_CSKY_EXREGS10:
+ return "exregs10";
+ case PERF_REG_CSKY_EXREGS11:
+ return "exregs11";
+ case PERF_REG_CSKY_EXREGS12:
+ return "exregs12";
+ case PERF_REG_CSKY_EXREGS13:
+ return "exregs13";
+ case PERF_REG_CSKY_EXREGS14:
+ return "exregs14";
+ case PERF_REG_CSKY_TLS:
+ return "tls";
+ case PERF_REG_CSKY_HI:
+ return "hi";
+ case PERF_REG_CSKY_LO:
+ return "lo";
+#endif
+ default:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/csky/util/Build b/tools/perf/arch/csky/util/Build
new file mode 100644
index 000000000000..1160bb2332ba
--- /dev/null
+++ b/tools/perf/arch/csky/util/Build
@@ -0,0 +1,2 @@
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/csky/util/dwarf-regs.c b/tools/perf/arch/csky/util/dwarf-regs.c
new file mode 100644
index 000000000000..ca86ecaeacbb
--- /dev/null
+++ b/tools/perf/arch/csky/util/dwarf-regs.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
+// Mapping of DWARF debug register numbers into register names.
+
+#include <stddef.h>
+#include <dwarf-regs.h>
+
+#if defined(__CSKYABIV2__)
+#define CSKY_MAX_REGS 73
+const char *csky_dwarf_regs_table[CSKY_MAX_REGS] = {
+ /* r0 ~ r8 */
+ "%a0", "%a1", "%a2", "%a3", "%regs0", "%regs1", "%regs2", "%regs3",
+ /* r9 ~ r15 */
+ "%regs4", "%regs5", "%regs6", "%regs7", "%regs8", "%regs9", "%sp",
+ "%lr",
+ /* r16 ~ r23 */
+ "%exregs0", "%exregs1", "%exregs2", "%exregs3", "%exregs4",
+ "%exregs5", "%exregs6", "%exregs7",
+ /* r24 ~ r31 */
+ "%exregs8", "%exregs9", "%exregs10", "%exregs11", "%exregs12",
+ "%exregs13", "%exregs14", "%tls",
+ "%pc", NULL, NULL, NULL, "%hi", "%lo", NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ "%epc",
+};
+#else
+#define CSKY_MAX_REGS 57
+const char *csky_dwarf_regs_table[CSKY_MAX_REGS] = {
+ /* r0 ~ r8 */
+ "%sp", "%regs9", "%a0", "%a1", "%a2", "%a3", "%regs0", "%regs1",
+ /* r9 ~ r15 */
+ "%regs2", "%regs3", "%regs4", "%regs5", "%regs6", "%regs7", "%regs8",
+ "%lr",
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ "%epc",
+};
+#endif
+
+const char *get_arch_regstr(unsigned int n)
+{
+ return (n < CSKY_MAX_REGS) ? csky_dwarf_regs_table[n] : NULL;
+}
diff --git a/tools/perf/arch/csky/util/unwind-libdw.c b/tools/perf/arch/csky/util/unwind-libdw.c
new file mode 100644
index 000000000000..4bb4a06776e4
--- /dev/null
+++ b/tools/perf/arch/csky/util/unwind-libdw.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <elfutils/libdwfl.h>
+#include "../../util/unwind-libdw.h"
+#include "../../util/perf_regs.h"
+#include "../../util/event.h"
+
+bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
+{
+ struct unwind_info *ui = arg;
+ struct regs_dump *user_regs = &ui->sample->user_regs;
+ Dwarf_Word dwarf_regs[PERF_REG_CSKY_MAX];
+
+#define REG(r) ({ \
+ Dwarf_Word val = 0; \
+ perf_reg_value(&val, user_regs, PERF_REG_CSKY_##r); \
+ val; \
+})
+
+#if defined(__CSKYABIV2__)
+ dwarf_regs[0] = REG(A0);
+ dwarf_regs[1] = REG(A1);
+ dwarf_regs[2] = REG(A2);
+ dwarf_regs[3] = REG(A3);
+ dwarf_regs[4] = REG(REGS0);
+ dwarf_regs[5] = REG(REGS1);
+ dwarf_regs[6] = REG(REGS2);
+ dwarf_regs[7] = REG(REGS3);
+ dwarf_regs[8] = REG(REGS4);
+ dwarf_regs[9] = REG(REGS5);
+ dwarf_regs[10] = REG(REGS6);
+ dwarf_regs[11] = REG(REGS7);
+ dwarf_regs[12] = REG(REGS8);
+ dwarf_regs[13] = REG(REGS9);
+ dwarf_regs[14] = REG(SP);
+ dwarf_regs[15] = REG(LR);
+ dwarf_regs[16] = REG(EXREGS0);
+ dwarf_regs[17] = REG(EXREGS1);
+ dwarf_regs[18] = REG(EXREGS2);
+ dwarf_regs[19] = REG(EXREGS3);
+ dwarf_regs[20] = REG(EXREGS4);
+ dwarf_regs[21] = REG(EXREGS5);
+ dwarf_regs[22] = REG(EXREGS6);
+ dwarf_regs[23] = REG(EXREGS7);
+ dwarf_regs[24] = REG(EXREGS8);
+ dwarf_regs[25] = REG(EXREGS9);
+ dwarf_regs[26] = REG(EXREGS10);
+ dwarf_regs[27] = REG(EXREGS11);
+ dwarf_regs[28] = REG(EXREGS12);
+ dwarf_regs[29] = REG(EXREGS13);
+ dwarf_regs[30] = REG(EXREGS14);
+ dwarf_regs[31] = REG(TLS);
+ dwarf_regs[32] = REG(PC);
+#else
+ dwarf_regs[0] = REG(SP);
+ dwarf_regs[1] = REG(REGS9);
+ dwarf_regs[2] = REG(A0);
+ dwarf_regs[3] = REG(A1);
+ dwarf_regs[4] = REG(A2);
+ dwarf_regs[5] = REG(A3);
+ dwarf_regs[6] = REG(REGS0);
+ dwarf_regs[7] = REG(REGS1);
+ dwarf_regs[8] = REG(REGS2);
+ dwarf_regs[9] = REG(REGS3);
+ dwarf_regs[10] = REG(REGS4);
+ dwarf_regs[11] = REG(REGS5);
+ dwarf_regs[12] = REG(REGS6);
+ dwarf_regs[13] = REG(REGS7);
+ dwarf_regs[14] = REG(REGS8);
+ dwarf_regs[15] = REG(LR);
+#endif
+ dwfl_thread_state_register_pc(thread, REG(PC));
+
+ return dwfl_thread_state_registers(thread, 0, PERF_REG_CSKY_MAX,
+ dwarf_regs);
+}
diff --git a/tools/perf/arch/nds32/Build b/tools/perf/arch/nds32/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/nds32/Build
+++ b/tools/perf/arch/nds32/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/nds32/util/Build b/tools/perf/arch/nds32/util/Build
index ca623bbf993c..d0bc205fe49a 100644
--- a/tools/perf/arch/nds32/util/Build
+++ b/tools/perf/arch/nds32/util/Build
@@ -1 +1 @@
-libperf-y += header.o
+perf-y += header.o
diff --git a/tools/perf/arch/powerpc/Build b/tools/perf/arch/powerpc/Build
index db52fa22d3a1..a7dd46a5b678 100644
--- a/tools/perf/arch/powerpc/Build
+++ b/tools/perf/arch/powerpc/Build
@@ -1,2 +1,2 @@
-libperf-y += util/
-libperf-y += tests/
+perf-y += util/
+perf-y += tests/
diff --git a/tools/perf/arch/powerpc/tests/Build b/tools/perf/arch/powerpc/tests/Build
index d827ef384b33..3526ab0af9f9 100644
--- a/tools/perf/arch/powerpc/tests/Build
+++ b/tools/perf/arch/powerpc/tests/Build
@@ -1,4 +1,4 @@
-libperf-$(CONFIG_DWARF_UNWIND) += regs_load.o
-libperf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+perf-$(CONFIG_DWARF_UNWIND) += regs_load.o
+perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
-libperf-y += arch-tests.o
+perf-y += arch-tests.o
diff --git a/tools/perf/arch/powerpc/tests/dwarf-unwind.c b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
index 5f39efef0856..5c178e4a1995 100644
--- a/tools/perf/arch/powerpc/tests/dwarf-unwind.c
+++ b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
+#include "map_groups.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build
index ba98bd006488..7cf0b8803097 100644
--- a/tools/perf/arch/powerpc/util/Build
+++ b/tools/perf/arch/powerpc/util/Build
@@ -1,11 +1,11 @@
-libperf-y += header.o
-libperf-y += sym-handling.o
-libperf-y += kvm-stat.o
-libperf-y += perf_regs.o
-libperf-y += mem-events.o
+perf-y += header.o
+perf-y += sym-handling.o
+perf-y += kvm-stat.o
+perf-y += perf_regs.o
+perf-y += mem-events.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_DWARF) += skip-callchain-idx.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += skip-callchain-idx.o
-libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/powerpc/util/dwarf-regs.c b/tools/perf/arch/powerpc/util/dwarf-regs.c
index 98ac87052a74..4952890b9428 100644
--- a/tools/perf/arch/powerpc/util/dwarf-regs.c
+++ b/tools/perf/arch/powerpc/util/dwarf-regs.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Mapping of DWARF debug register numbers into register names.
*
* Copyright (C) 2010 Ian Munsie, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <stddef.h>
diff --git a/tools/perf/arch/powerpc/util/kvm-stat.c b/tools/perf/arch/powerpc/util/kvm-stat.c
index 596ad6aedaac..f9db341c47b6 100644
--- a/tools/perf/arch/powerpc/util/kvm-stat.c
+++ b/tools/perf/arch/powerpc/util/kvm-stat.c
@@ -3,6 +3,8 @@
#include "util/kvm-stat.h"
#include "util/parse-events.h"
#include "util/debug.h"
+#include "util/evsel.h"
+#include "util/evlist.h"
#include "book3s_hv_exits.h"
#include "book3s_hcalls.h"
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index 7c6eeb4633fe..fc9c2f5fcd52 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Use DWARF Debug information to skip unnecessary callchain entries.
*
* Copyright (C) 2014 Sukadev Bhattiprolu, IBM Corporation.
* Copyright (C) 2014 Ulrich Weigand, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <inttypes.h>
#include <dwarf.h>
@@ -16,6 +12,9 @@
#include "util/thread.h"
#include "util/callchain.h"
#include "util/debug.h"
+#include "util/dso.h"
+#include "util/map.h"
+#include "util/symbol.h"
/*
* When saving the callchain on Power, the kernel conservatively saves
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
index 10a44e946f77..b0a67eaf2ce8 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -1,7 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
*
* Copyright (C) 2015 Naveen N. Rao, IBM Corporation
*/
diff --git a/tools/perf/arch/powerpc/util/unwind-libunwind.c b/tools/perf/arch/powerpc/util/unwind-libunwind.c
index 9e15f92ae49f..90a6beda20de 100644
--- a/tools/perf/arch/powerpc/util/unwind-libunwind.c
+++ b/tools/perf/arch/powerpc/util/unwind-libunwind.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016 Chandan Kumar, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <errno.h>
diff --git a/tools/perf/arch/s390/Build b/tools/perf/arch/s390/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/s390/Build
+++ b/tools/perf/arch/s390/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/s390/Makefile b/tools/perf/arch/s390/Makefile
index dfa6e3103437..cb198787570a 100644
--- a/tools/perf/arch/s390/Makefile
+++ b/tools/perf/arch/s390/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
endif
diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c
index de0dd66dbb48..89bb8f2c54ce 100644
--- a/tools/perf/arch/s390/annotate/instructions.c
+++ b/tools/perf/arch/s390/annotate/instructions.c
@@ -46,7 +46,7 @@ static int s390_call__parse(struct arch *arch, struct ins_operands *ops,
}
static int call__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops);
+ struct ins_operands *ops, int max_ins_name);
static struct ins_ops s390_call_ops = {
.parse = s390_call__parse,
diff --git a/tools/perf/arch/s390/util/Build b/tools/perf/arch/s390/util/Build
index 4a233683c684..22797f043b84 100644
--- a/tools/perf/arch/s390/util/Build
+++ b/tools/perf/arch/s390/util/Build
@@ -1,9 +1,9 @@
-libperf-y += header.o
-libperf-y += kvm-stat.o
+perf-y += header.o
+perf-y += kvm-stat.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-y += machine.o
+perf-y += machine.o
-libperf-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-$(CONFIG_AUXTRACE) += auxtrace.o
diff --git a/tools/perf/arch/s390/util/header.c b/tools/perf/arch/s390/util/header.c
index 163b92f33998..3db85cd2069e 100644
--- a/tools/perf/arch/s390/util/header.c
+++ b/tools/perf/arch/s390/util/header.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Implementation of get_cpuid().
*
* Copyright IBM Corp. 2014, 2018
* Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
* Thomas Richter <tmricht@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#include <sys/types.h>
diff --git a/tools/perf/arch/s390/util/kvm-stat.c b/tools/perf/arch/s390/util/kvm-stat.c
index aaabab5e2830..f852f2a77e0a 100644
--- a/tools/perf/arch/s390/util/kvm-stat.c
+++ b/tools/perf/arch/s390/util/kvm-stat.c
@@ -1,16 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Arch specific functions for perf kvm stat.
*
* Copyright 2014 IBM Corp.
* Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#include <errno.h>
#include "../../util/kvm-stat.h"
+#include "../../util/evsel.h"
#include <asm/sie.h>
define_exit_reasons_table(sie_exit_reasons, sie_intercept_code);
diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
index 0b2054007314..a19690a17291 100644
--- a/tools/perf/arch/s390/util/machine.c
+++ b/tools/perf/arch/s390/util/machine.c
@@ -5,16 +5,19 @@
#include "util.h"
#include "machine.h"
#include "api/fs/fs.h"
+#include "debug.h"
int arch__fix_module_text_start(u64 *start, const char *name)
{
+ u64 m_start = *start;
char path[PATH_MAX];
snprintf(path, PATH_MAX, "module/%.*s/sections/.text",
(int)strlen(name) - 2, name + 1);
-
- if (sysfs__read_ull(path, (unsigned long long *)start) < 0)
- return -1;
+ if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
+ pr_debug2("Using module %s start:%#lx\n", path, m_start);
+ *start = m_start;
+ }
return 0;
}
diff --git a/tools/perf/arch/sh/Build b/tools/perf/arch/sh/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/sh/Build
+++ b/tools/perf/arch/sh/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/sh/Makefile b/tools/perf/arch/sh/Makefile
index 7fbca175099e..88c08eed9c7b 100644
--- a/tools/perf/arch/sh/Makefile
+++ b/tools/perf/arch/sh/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
endif
diff --git a/tools/perf/arch/sh/util/Build b/tools/perf/arch/sh/util/Build
index 954e287bbb89..e813e618954b 100644
--- a/tools/perf/arch/sh/util/Build
+++ b/tools/perf/arch/sh/util/Build
@@ -1 +1 @@
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/arch/sh/util/dwarf-regs.c b/tools/perf/arch/sh/util/dwarf-regs.c
index f8dfa89696f4..4b17fc86c73b 100644
--- a/tools/perf/arch/sh/util/dwarf-regs.c
+++ b/tools/perf/arch/sh/util/dwarf-regs.c
@@ -1,22 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Mapping of DWARF debug register numbers into register names.
*
* Copyright (C) 2010 Matt Fleming <matt@console-pimps.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#include <stddef.h>
diff --git a/tools/perf/arch/sparc/Build b/tools/perf/arch/sparc/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/sparc/Build
+++ b/tools/perf/arch/sparc/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/sparc/Makefile b/tools/perf/arch/sparc/Makefile
index 275dea7ff59a..4031db72ba71 100644
--- a/tools/perf/arch/sparc/Makefile
+++ b/tools/perf/arch/sparc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
endif
diff --git a/tools/perf/arch/sparc/util/Build b/tools/perf/arch/sparc/util/Build
index 954e287bbb89..e813e618954b 100644
--- a/tools/perf/arch/sparc/util/Build
+++ b/tools/perf/arch/sparc/util/Build
@@ -1 +1 @@
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/arch/sparc/util/dwarf-regs.c b/tools/perf/arch/sparc/util/dwarf-regs.c
index b704fdb9237a..1282cb2dc7bd 100644
--- a/tools/perf/arch/sparc/util/dwarf-regs.c
+++ b/tools/perf/arch/sparc/util/dwarf-regs.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Mapping of DWARF debug register numbers into register names.
*
* Copyright (C) 2010 David S. Miller <davem@davemloft.net>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <stddef.h>
diff --git a/tools/perf/arch/x86/Build b/tools/perf/arch/x86/Build
index db52fa22d3a1..a7dd46a5b678 100644
--- a/tools/perf/arch/x86/Build
+++ b/tools/perf/arch/x86/Build
@@ -1,2 +1,2 @@
-libperf-y += util/
-libperf-y += tests/
+perf-y += util/
+perf-y += tests/
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index f0b1709a5ffb..b4e6f9e6204a 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -343,6 +343,18 @@
332 common statx __x64_sys_statx
333 common io_pgetevents __x64_sys_io_pgetevents
334 common rseq __x64_sys_rseq
+# don't use numbers 387 through 423, add new calls after the last
+# 'common' entry
+424 common pidfd_send_signal __x64_sys_pidfd_send_signal
+425 common io_uring_setup __x64_sys_io_uring_setup
+426 common io_uring_enter __x64_sys_io_uring_enter
+427 common io_uring_register __x64_sys_io_uring_register
+428 common open_tree __x64_sys_open_tree
+429 common move_mount __x64_sys_move_mount
+430 common fsopen __x64_sys_fsopen
+431 common fsconfig __x64_sys_fsconfig
+432 common fsmount __x64_sys_fsmount
+433 common fspick __x64_sys_fspick
#
# x32-specific system call numbers start at 512 to avoid cache impact
@@ -361,7 +373,7 @@
520 x32 execve __x32_compat_sys_execve/ptregs
521 x32 ptrace __x32_compat_sys_ptrace
522 x32 rt_sigpending __x32_compat_sys_rt_sigpending
-523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait
+523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait_time64
524 x32 rt_sigqueueinfo __x32_compat_sys_rt_sigqueueinfo
525 x32 sigaltstack __x32_compat_sys_sigaltstack
526 x32 timer_create __x32_compat_sys_timer_create
@@ -375,7 +387,7 @@
534 x32 preadv __x32_compat_sys_preadv64
535 x32 pwritev __x32_compat_sys_pwritev64
536 x32 rt_tgsigqueueinfo __x32_compat_sys_rt_tgsigqueueinfo
-537 x32 recvmmsg __x32_compat_sys_recvmmsg
+537 x32 recvmmsg __x32_compat_sys_recvmmsg_time64
538 x32 sendmmsg __x32_compat_sys_sendmmsg
539 x32 process_vm_readv __x32_compat_sys_process_vm_readv
540 x32 process_vm_writev __x32_compat_sys_process_vm_writev
diff --git a/tools/perf/arch/x86/include/perf_regs.h b/tools/perf/arch/x86/include/perf_regs.h
index 7f6d538f8a89..b7321337d100 100644
--- a/tools/perf/arch/x86/include/perf_regs.h
+++ b/tools/perf/arch/x86/include/perf_regs.h
@@ -8,9 +8,9 @@
void perf_regs_load(u64 *regs);
+#define PERF_REGS_MAX PERF_REG_X86_XMM_MAX
#ifndef HAVE_ARCH_X86_64_SUPPORT
#define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1)
-#define PERF_REGS_MAX PERF_REG_X86_32_MAX
#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32
#else
#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
@@ -18,7 +18,6 @@ void perf_regs_load(u64 *regs);
(1ULL << PERF_REG_X86_FS) | \
(1ULL << PERF_REG_X86_GS))
#define PERF_REGS_MASK (((1ULL << PERF_REG_X86_64_MAX) - 1) & ~REG_NOSUPPORT)
-#define PERF_REGS_MAX PERF_REG_X86_64_MAX
#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64
#endif
#define PERF_REG_IP PERF_REG_X86_IP
@@ -77,6 +76,28 @@ static inline const char *perf_reg_name(int id)
case PERF_REG_X86_R15:
return "R15";
#endif /* HAVE_ARCH_X86_64_SUPPORT */
+
+#define XMM(x) \
+ case PERF_REG_X86_XMM ## x: \
+ case PERF_REG_X86_XMM ## x + 1: \
+ return "XMM" #x;
+ XMM(0)
+ XMM(1)
+ XMM(2)
+ XMM(3)
+ XMM(4)
+ XMM(5)
+ XMM(6)
+ XMM(7)
+ XMM(8)
+ XMM(9)
+ XMM(10)
+ XMM(11)
+ XMM(12)
+ XMM(13)
+ XMM(14)
+ XMM(15)
+#undef XMM
default:
return NULL;
}
diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build
index 586849ff83a0..3d83d0c6982d 100644
--- a/tools/perf/arch/x86/tests/Build
+++ b/tools/perf/arch/x86/tests/Build
@@ -1,8 +1,8 @@
-libperf-$(CONFIG_DWARF_UNWIND) += regs_load.o
-libperf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+perf-$(CONFIG_DWARF_UNWIND) += regs_load.o
+perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
-libperf-y += arch-tests.o
-libperf-y += rdpmc.o
-libperf-y += perf-time-to-tsc.o
-libperf-$(CONFIG_AUXTRACE) += insn-x86.o
-libperf-$(CONFIG_X86_64) += bp-modify.o
+perf-y += arch-tests.o
+perf-y += rdpmc.o
+perf-y += perf-time-to-tsc.o
+perf-$(CONFIG_AUXTRACE) += insn-x86.o
+perf-$(CONFIG_X86_64) += bp-modify.o
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c
index 7879df34569a..6ad0a1cedb13 100644
--- a/tools/perf/arch/x86/tests/dwarf-unwind.c
+++ b/tools/perf/arch/x86/tests/dwarf-unwind.c
@@ -3,6 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
+#include "map_groups.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
diff --git a/tools/perf/arch/x86/tests/gen-insn-x86-dat.awk b/tools/perf/arch/x86/tests/gen-insn-x86-dat.awk
index a21454835cd4..1a29f6379bde 100644
--- a/tools/perf/arch/x86/tests/gen-insn-x86-dat.awk
+++ b/tools/perf/arch/x86/tests/gen-insn-x86-dat.awk
@@ -1,15 +1,8 @@
#!/bin/awk -f
+# SPDX-License-Identifier: GPL-2.0-only
# gen-insn-x86-dat.awk: script to convert data for the insn-x86 test
# Copyright (c) 2015, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
BEGIN {
print "/*"
diff --git a/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh b/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh
index 2d4ef94cff98..0d0a003a9c5e 100755
--- a/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh
+++ b/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh
@@ -1,15 +1,8 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
# gen-insn-x86-dat: generate data for the insn-x86 test
# Copyright (c) 2015, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
set -e
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
index 844b8f335532..47f9c56e744f 100644
--- a/tools/perf/arch/x86/util/Build
+++ b/tools/perf/arch/x86/util/Build
@@ -1,18 +1,19 @@
-libperf-y += header.o
-libperf-y += tsc.o
-libperf-y += pmu.o
-libperf-y += kvm-stat.o
-libperf-y += perf_regs.o
-libperf-y += group.o
-libperf-y += machine.o
-libperf-y += event.o
+perf-y += header.o
+perf-y += tsc.o
+perf-y += pmu.o
+perf-y += kvm-stat.o
+perf-y += perf_regs.o
+perf-y += group.o
+perf-y += machine.o
+perf-y += event.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
-libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-$(CONFIG_AUXTRACE) += auxtrace.o
-libperf-$(CONFIG_AUXTRACE) += intel-pt.o
-libperf-$(CONFIG_AUXTRACE) += intel-bts.o
+perf-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-$(CONFIG_AUXTRACE) += archinsn.o
+perf-$(CONFIG_AUXTRACE) += intel-pt.o
+perf-$(CONFIG_AUXTRACE) += intel-bts.o
diff --git a/tools/perf/arch/x86/util/archinsn.c b/tools/perf/arch/x86/util/archinsn.c
new file mode 100644
index 000000000000..4237bb2e7fa2
--- /dev/null
+++ b/tools/perf/arch/x86/util/archinsn.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "perf.h"
+#include "archinsn.h"
+#include "util/intel-pt-decoder/insn.h"
+#include "machine.h"
+#include "thread.h"
+#include "symbol.h"
+
+void arch_fetch_insn(struct perf_sample *sample,
+ struct thread *thread,
+ struct machine *machine)
+{
+ struct insn insn;
+ int len;
+ bool is64bit = false;
+
+ if (!sample->ip)
+ return;
+ len = thread__memcpy(thread, machine, sample->insn, sample->ip, sizeof(sample->insn), &is64bit);
+ if (len <= 0)
+ return;
+ insn_init(&insn, sample->insn, len, is64bit);
+ insn_get_length(&insn);
+ if (insn_complete(&insn) && insn.length <= len)
+ sample->insn_len = insn.length;
+}
diff --git a/tools/perf/arch/x86/util/auxtrace.c b/tools/perf/arch/x86/util/auxtrace.c
index b135af62011c..d711268af330 100644
--- a/tools/perf/arch/x86/util/auxtrace.c
+++ b/tools/perf/arch/x86/util/auxtrace.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* auxtrace.c: AUX area tracing support
* Copyright (c) 2013-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <errno.h>
diff --git a/tools/perf/arch/x86/util/dwarf-regs.c b/tools/perf/arch/x86/util/dwarf-regs.c
index 1f86ee8fb831..530934805710 100644
--- a/tools/perf/arch/x86/util/dwarf-regs.c
+++ b/tools/perf/arch/x86/util/dwarf-regs.c
@@ -1,23 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dwarf-regs.c : Mapping of DWARF debug register numbers into register names.
* Extracted from probe-finder.c
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#include <stddef.h>
diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
index 781df40b2966..e6d4d9591c79 100644
--- a/tools/perf/arch/x86/util/intel-bts.c
+++ b/tools/perf/arch/x86/util/intel-bts.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* intel-bts.c: Intel Processor Trace support
* Copyright (c) 2013-2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <errno.h>
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index ba8ecaf52200..1869f62a10cd 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* intel_pt.c: Intel Processor Trace support
* Copyright (c) 2013-2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <errno.h>
diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
index 081353d7b095..865a9762f22e 100644
--- a/tools/perf/arch/x86/util/kvm-stat.c
+++ b/tools/perf/arch/x86/util/kvm-stat.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include "../../util/kvm-stat.h"
+#include "../../util/evsel.h"
#include <asm/svm.h>
#include <asm/vmx.h>
#include <asm/kvm.h>
diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c
index fead6b3b4206..3666c0076df9 100644
--- a/tools/perf/arch/x86/util/perf_regs.c
+++ b/tools/perf/arch/x86/util/perf_regs.c
@@ -31,6 +31,22 @@ const struct sample_reg sample_reg_masks[] = {
SMPL_REG(R14, PERF_REG_X86_R14),
SMPL_REG(R15, PERF_REG_X86_R15),
#endif
+ SMPL_REG2(XMM0, PERF_REG_X86_XMM0),
+ SMPL_REG2(XMM1, PERF_REG_X86_XMM1),
+ SMPL_REG2(XMM2, PERF_REG_X86_XMM2),
+ SMPL_REG2(XMM3, PERF_REG_X86_XMM3),
+ SMPL_REG2(XMM4, PERF_REG_X86_XMM4),
+ SMPL_REG2(XMM5, PERF_REG_X86_XMM5),
+ SMPL_REG2(XMM6, PERF_REG_X86_XMM6),
+ SMPL_REG2(XMM7, PERF_REG_X86_XMM7),
+ SMPL_REG2(XMM8, PERF_REG_X86_XMM8),
+ SMPL_REG2(XMM9, PERF_REG_X86_XMM9),
+ SMPL_REG2(XMM10, PERF_REG_X86_XMM10),
+ SMPL_REG2(XMM11, PERF_REG_X86_XMM11),
+ SMPL_REG2(XMM12, PERF_REG_X86_XMM12),
+ SMPL_REG2(XMM13, PERF_REG_X86_XMM13),
+ SMPL_REG2(XMM14, PERF_REG_X86_XMM14),
+ SMPL_REG2(XMM15, PERF_REG_X86_XMM15),
SMPL_REG_END
};
@@ -254,3 +270,31 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
return SDT_ARG_VALID;
}
+
+uint64_t arch__intr_reg_mask(void)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .sample_type = PERF_SAMPLE_REGS_INTR,
+ .sample_regs_intr = PERF_REG_EXTENDED_MASK,
+ .precise_ip = 1,
+ .disabled = 1,
+ .exclude_kernel = 1,
+ };
+ int fd;
+ /*
+ * In an unnamed union, init it here to build on older gcc versions
+ */
+ attr.sample_period = 1;
+
+ event_attr_init(&attr);
+
+ fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+ if (fd != -1) {
+ close(fd);
+ return (PERF_REG_EXTENDED_MASK | PERF_REGS_MASK);
+ }
+
+ return PERF_REGS_MASK;
+}
diff --git a/tools/perf/arch/xtensa/Build b/tools/perf/arch/xtensa/Build
index 54afe4a467e7..e4e5f33c84d8 100644
--- a/tools/perf/arch/xtensa/Build
+++ b/tools/perf/arch/xtensa/Build
@@ -1 +1 @@
-libperf-y += util/
+perf-y += util/
diff --git a/tools/perf/arch/xtensa/Makefile b/tools/perf/arch/xtensa/Makefile
index 7fbca175099e..88c08eed9c7b 100644
--- a/tools/perf/arch/xtensa/Makefile
+++ b/tools/perf/arch/xtensa/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
endif
diff --git a/tools/perf/arch/xtensa/util/Build b/tools/perf/arch/xtensa/util/Build
index 954e287bbb89..e813e618954b 100644
--- a/tools/perf/arch/xtensa/util/Build
+++ b/tools/perf/arch/xtensa/util/Build
@@ -1 +1 @@
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/arch/xtensa/util/dwarf-regs.c b/tools/perf/arch/xtensa/util/dwarf-regs.c
index 4dba76bfb4ce..12f5457300f5 100644
--- a/tools/perf/arch/xtensa/util/dwarf-regs.c
+++ b/tools/perf/arch/xtensa/util/dwarf-regs.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Mapping of DWARF debug register numbers into register names.
*
* Copyright (c) 2015 Cadence Design Systems Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <stddef.h>
diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c
index 0c0a6e824934..2af067859966 100644
--- a/tools/perf/bench/epoll-ctl.c
+++ b/tools/perf/bench/epoll-ctl.c
@@ -224,7 +224,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
pthread_attr_t thread_attr, *attrp = NULL;
cpu_set_t cpuset;
unsigned int i, j;
- int ret;
+ int ret = 0;
if (!noaffinity)
pthread_attr_init(&thread_attr);
diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
index 5a11534e96a0..fe85448abd45 100644
--- a/tools/perf/bench/epoll-wait.c
+++ b/tools/perf/bench/epoll-wait.c
@@ -293,7 +293,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
pthread_attr_t thread_attr, *attrp = NULL;
cpu_set_t cpuset;
unsigned int i, j;
- int ret, events = EPOLLIN;
+ int ret = 0, events = EPOLLIN;
if (oneshot)
events |= EPOLLONESHOT;
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 44195514b19e..a7784554a80d 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -34,10 +34,15 @@
#include <sys/types.h>
#include <linux/kernel.h>
#include <linux/time64.h>
+#include <linux/numa.h>
#include <numa.h>
#include <numaif.h>
+#ifndef RUSAGE_THREAD
+# define RUSAGE_THREAD 1
+#endif
+
/*
* Regular printout to the terminal, supressed if -q is specified:
*/
@@ -298,7 +303,7 @@ static cpu_set_t bind_to_node(int target_node)
CPU_ZERO(&mask);
- if (target_node == -1) {
+ if (target_node == NUMA_NO_NODE) {
for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
CPU_SET(cpu, &mask);
} else {
@@ -339,7 +344,7 @@ static void bind_to_memnode(int node)
unsigned long nodemask;
int ret;
- if (node == -1)
+ if (node == NUMA_NO_NODE)
return;
BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask)*8);
@@ -1363,7 +1368,7 @@ static void init_thread_data(void)
int cpu;
/* Allow all nodes by default: */
- td->bind_node = -1;
+ td->bind_node = NUMA_NO_NODE;
/* Allow all CPUs by default: */
CPU_ZERO(&td->bind_cpumask);
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 93d679eaf1f4..77deb3a40596 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -27,6 +27,7 @@
#include "util/thread.h"
#include "util/sort.h"
#include "util/hist.h"
+#include "util/map.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/data.h"
@@ -158,8 +159,6 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
struct perf_evsel *evsel = iter->evsel;
int err;
- hist__account_cycles(sample->branch_stack, al, sample, false);
-
bi = he->branch_info;
err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
@@ -198,6 +197,8 @@ static int process_branch_callback(struct perf_evsel *evsel,
if (a.map != NULL)
a.map->dso->hit = 1;
+ hist__account_cycles(sample->branch_stack, al, sample, false);
+
ret = hist_entry_iter__add(&iter, &a, PERF_MAX_STACK_DEPTH, ann);
return ret;
}
@@ -227,7 +228,7 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
* the DSO?
*/
if (al->sym != NULL) {
- rb_erase(&al->sym->rb_node,
+ rb_erase_cached(&al->sym->rb_node,
&al->map->dso->symbols);
symbol__delete(al->sym);
dso__reset_find_symbol_cache(al->map->dso);
@@ -305,7 +306,7 @@ static void hists__find_annotations(struct hists *hists,
struct perf_evsel *evsel,
struct perf_annotate *ann)
{
- struct rb_node *nd = rb_first(&hists->entries), *next;
+ struct rb_node *nd = rb_first_cached(&hists->entries), *next;
int key = K_RIGHT;
while (nd) {
@@ -440,7 +441,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
}
if (total_nr_samples == 0) {
- ui__error("The %s file has no samples!\n", session->data->file.path);
+ ui__error("The %s data has no samples!\n", session->data->path);
goto out;
}
@@ -577,7 +578,7 @@ int cmd_annotate(int argc, const char **argv)
if (quiet)
perf_quiet_option();
- data.file.path = input_name;
+ data.path = input_name;
annotate.session = perf_session__new(&data, false, &annotate.tool);
if (annotate.session == NULL)
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 115110a4796a..10457b10e568 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -416,8 +416,8 @@ int cmd_buildid_cache(int argc, const char **argv)
nsi = nsinfo__new(ns_id);
if (missing_filename) {
- data.file.path = missing_filename;
- data.force = force;
+ data.path = missing_filename;
+ data.force = force;
session = perf_session__new(&data, false, NULL);
if (session == NULL)
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index 78abbe8d9d5f..f403e19488b5 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -52,11 +52,9 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
{
struct perf_session *session;
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = force,
};
symbol__elf_init();
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index d340d2e42776..9e6cc868bdb4 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -33,6 +33,7 @@
#include "ui/browsers/hists.h"
#include "thread.h"
#include "mem2node.h"
+#include "symbol.h"
struct c2c_hists {
struct hists hists;
@@ -1969,7 +1970,7 @@ static void calc_width(struct c2c_hist_entry *c2c_he)
set_nodestr(c2c_he);
}
-static int filter_cb(struct hist_entry *he)
+static int filter_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
@@ -1986,7 +1987,7 @@ static int filter_cb(struct hist_entry *he)
return 0;
}
-static int resort_cl_cb(struct hist_entry *he)
+static int resort_cl_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
struct c2c_hists *c2c_hists;
@@ -2055,6 +2056,12 @@ static int setup_nodes(struct perf_session *session)
if (!set)
return -ENOMEM;
+ nodes[node] = set;
+
+ /* empty node, skip */
+ if (cpu_map__empty(map))
+ continue;
+
for (cpu = 0; cpu < map->nr; cpu++) {
set_bit(map->map[cpu], set);
@@ -2063,8 +2070,6 @@ static int setup_nodes(struct perf_session *session)
cpu2node[map->map[cpu]] = node;
}
-
- nodes[node] = set;
}
setup_nodes_header();
@@ -2073,7 +2078,7 @@ static int setup_nodes(struct perf_session *session)
#define HAS_HITMS(__h) ((__h)->stats.lcl_hitm || (__h)->stats.rmt_hitm)
-static int resort_hitm_cb(struct hist_entry *he)
+static int resort_hitm_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
c2c_he = container_of(he, struct c2c_hist_entry, he);
@@ -2088,14 +2093,14 @@ static int resort_hitm_cb(struct hist_entry *he)
static int hists__iterate_cb(struct hists *hists, hists__resort_cb_t cb)
{
- struct rb_node *next = rb_first(&hists->entries);
+ struct rb_node *next = rb_first_cached(&hists->entries);
int ret = 0;
while (next) {
struct hist_entry *he;
he = rb_entry(next, struct hist_entry, rb_node);
- ret = cb(he);
+ ret = cb(he, NULL);
if (ret)
break;
next = rb_next(&he->rb_node);
@@ -2215,7 +2220,7 @@ static void print_pareto(FILE *out)
if (WARN_ONCE(ret, "failed to setup sort entries\n"))
return;
- nd = rb_first(&c2c.hists.hists.entries);
+ nd = rb_first_cached(&c2c.hists.hists.entries);
for (; nd; nd = rb_next(nd)) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
@@ -2283,7 +2288,7 @@ static void perf_c2c__hists_fprintf(FILE *out, struct perf_session *session)
static void c2c_browser__update_nr_entries(struct hist_browser *hb)
{
u64 nr_entries = 0;
- struct rb_node *nd = rb_first(&hb->hists->entries);
+ struct rb_node *nd = rb_first_cached(&hb->hists->entries);
while (nd) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
@@ -2343,7 +2348,7 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
struct c2c_cacheline_browser *cl_browser;
struct hist_browser *browser;
int key = -1;
- const char help[] =
+ static const char help[] =
" ENTER Toggle callchains (if present) \n"
" n Toggle Node details info \n"
" s Toggle full length of symbol and source line columns \n"
@@ -2424,7 +2429,7 @@ static int perf_c2c__hists_browse(struct hists *hists)
{
struct hist_browser *browser;
int key = -1;
- const char help[] =
+ static const char help[] =
" d Display cacheline details \n"
" ENTER Toggle callchains (if present) \n"
" q Quit \n";
@@ -2749,8 +2754,8 @@ static int perf_c2c__report(int argc, const char **argv)
if (!input_name || !strlen(input_name))
input_name = "perf.data";
- data.file.path = input_name;
- data.force = symbol_conf.force;
+ data.path = input_name;
+ data.force = symbol_conf.force;
err = setup_display(display);
if (err)
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 39db2ee32d48..6e7920793729 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -19,12 +19,21 @@
#include "util/util.h"
#include "util/data.h"
#include "util/config.h"
+#include "util/time-utils.h"
#include <errno.h>
#include <inttypes.h>
#include <stdlib.h>
#include <math.h>
+struct perf_diff {
+ struct perf_tool tool;
+ const char *time_str;
+ struct perf_time_interval *ptime_range;
+ int range_size;
+ int range_num;
+};
+
/* Diff command specific HPP columns. */
enum {
PERF_HPP_DIFF__BASELINE,
@@ -74,6 +83,9 @@ static unsigned int sort_compute = 1;
static s64 compute_wdiff_w1;
static s64 compute_wdiff_w2;
+static const char *cpu_list;
+static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+
enum {
COMPUTE_DELTA,
COMPUTE_RATIO,
@@ -323,22 +335,33 @@ static int formula_fprintf(struct hist_entry *he, struct hist_entry *pair,
return -1;
}
-static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
+static int diff__process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
+ struct perf_diff *pdiff = container_of(tool, struct perf_diff, tool);
struct addr_location al;
struct hists *hists = evsel__hists(evsel);
int ret = -1;
+ if (perf_time__ranges_skip_sample(pdiff->ptime_range, pdiff->range_num,
+ sample->time)) {
+ return 0;
+ }
+
if (machine__resolve(machine, &al, sample) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
+ if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) {
+ ret = 0;
+ goto out_put;
+ }
+
if (!hists__add_entry(hists, &al, NULL, NULL, NULL, sample, true)) {
pr_warning("problem incrementing symbol period, skipping event\n");
goto out_put;
@@ -359,17 +382,19 @@ out_put:
return ret;
}
-static struct perf_tool tool = {
- .sample = diff__process_sample_event,
- .mmap = perf_event__process_mmap,
- .mmap2 = perf_event__process_mmap2,
- .comm = perf_event__process_comm,
- .exit = perf_event__process_exit,
- .fork = perf_event__process_fork,
- .lost = perf_event__process_lost,
- .namespaces = perf_event__process_namespaces,
- .ordered_events = true,
- .ordering_requires_timestamps = true,
+static struct perf_diff pdiff = {
+ .tool = {
+ .sample = diff__process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .mmap2 = perf_event__process_mmap2,
+ .comm = perf_event__process_comm,
+ .exit = perf_event__process_exit,
+ .fork = perf_event__process_fork,
+ .lost = perf_event__process_lost,
+ .namespaces = perf_event__process_namespaces,
+ .ordered_events = true,
+ .ordering_requires_timestamps = true,
+ },
};
static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
@@ -429,7 +454,7 @@ get_pair_fmt(struct hist_entry *he, struct diff_hpp_fmt *dfmt)
static void hists__baseline_only(struct hists *hists)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *next;
if (hists__has(hists, need_collapse))
@@ -437,13 +462,13 @@ static void hists__baseline_only(struct hists *hists)
else
root = hists->entries_in;
- next = rb_first(root);
+ next = rb_first_cached(root);
while (next != NULL) {
struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&he->rb_node_in);
if (!hist_entry__next_pair(he)) {
- rb_erase(&he->rb_node_in, root);
+ rb_erase_cached(&he->rb_node_in, root);
hist_entry__delete(he);
}
}
@@ -451,7 +476,7 @@ static void hists__baseline_only(struct hists *hists)
static void hists__precompute(struct hists *hists)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *next;
if (hists__has(hists, need_collapse))
@@ -459,7 +484,7 @@ static void hists__precompute(struct hists *hists)
else
root = hists->entries_in;
- next = rb_first(root);
+ next = rb_first_cached(root);
while (next != NULL) {
struct hist_entry *he, *pair;
struct data__file *d;
@@ -708,7 +733,7 @@ static void data__fprintf(void)
data__for_each_file(i, d)
fprintf(stdout, "# [%d] %s %s\n",
- d->idx, d->data.file.path,
+ d->idx, d->data.path,
!d->idx ? "(Baseline)" : "");
fprintf(stdout, "#\n");
@@ -771,26 +796,127 @@ static void data__free(struct data__file *d)
}
}
+static int abstime_str_dup(char **pstr)
+{
+ char *str = NULL;
+
+ if (pdiff.time_str && strchr(pdiff.time_str, ':')) {
+ str = strdup(pdiff.time_str);
+ if (!str)
+ return -ENOMEM;
+ }
+
+ *pstr = str;
+ return 0;
+}
+
+static int parse_absolute_time(struct data__file *d, char **pstr)
+{
+ char *p = *pstr;
+ int ret;
+
+ /*
+ * Absolute timestamp for one file has the format: a.b,c.d
+ * For multiple files, the format is: a.b,c.d:a.b,c.d
+ */
+ p = strchr(*pstr, ':');
+ if (p) {
+ if (p == *pstr) {
+ pr_err("Invalid time string\n");
+ return -EINVAL;
+ }
+
+ *p = 0;
+ p++;
+ if (*p == 0) {
+ pr_err("Invalid time string\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = perf_time__parse_for_ranges(*pstr, d->session,
+ &pdiff.ptime_range,
+ &pdiff.range_size,
+ &pdiff.range_num);
+ if (ret < 0)
+ return ret;
+
+ if (!p || *p == 0)
+ *pstr = NULL;
+ else
+ *pstr = p;
+
+ return ret;
+}
+
+static int parse_percent_time(struct data__file *d)
+{
+ int ret;
+
+ ret = perf_time__parse_for_ranges(pdiff.time_str, d->session,
+ &pdiff.ptime_range,
+ &pdiff.range_size,
+ &pdiff.range_num);
+ return ret;
+}
+
+static int parse_time_str(struct data__file *d, char *abstime_ostr,
+ char **pabstime_tmp)
+{
+ int ret = 0;
+
+ if (abstime_ostr)
+ ret = parse_absolute_time(d, pabstime_tmp);
+ else if (pdiff.time_str)
+ ret = parse_percent_time(d);
+
+ return ret;
+}
+
static int __cmd_diff(void)
{
struct data__file *d;
- int ret = -EINVAL, i;
+ int ret, i;
+ char *abstime_ostr, *abstime_tmp;
+
+ ret = abstime_str_dup(&abstime_ostr);
+ if (ret)
+ return ret;
+
+ abstime_tmp = abstime_ostr;
+ ret = -EINVAL;
data__for_each_file(i, d) {
- d->session = perf_session__new(&d->data, false, &tool);
+ d->session = perf_session__new(&d->data, false, &pdiff.tool);
if (!d->session) {
- pr_err("Failed to open %s\n", d->data.file.path);
+ pr_err("Failed to open %s\n", d->data.path);
ret = -1;
goto out_delete;
}
+ if (pdiff.time_str) {
+ ret = parse_time_str(d, abstime_ostr, &abstime_tmp);
+ if (ret < 0)
+ goto out_delete;
+ }
+
+ if (cpu_list) {
+ ret = perf_session__cpu_bitmap(d->session, cpu_list,
+ cpu_bitmap);
+ if (ret < 0)
+ goto out_delete;
+ }
+
ret = perf_session__process_events(d->session);
if (ret) {
- pr_err("Failed to process %s\n", d->data.file.path);
+ pr_err("Failed to process %s\n", d->data.path);
goto out_delete;
}
perf_evlist__collapse_resort(d->session->evlist);
+
+ if (pdiff.ptime_range)
+ zfree(&pdiff.ptime_range);
}
data_process();
@@ -802,6 +928,13 @@ static int __cmd_diff(void)
}
free(data__files);
+
+ if (pdiff.ptime_range)
+ zfree(&pdiff.ptime_range);
+
+ if (abstime_ostr)
+ free(abstime_ostr);
+
return ret;
}
@@ -849,6 +982,13 @@ static const struct option options[] = {
OPT_UINTEGER('o', "order", &sort_compute, "Specify compute sorting."),
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
"How to display percentage of filtered entries", parse_filter_percentage),
+ OPT_STRING(0, "time", &pdiff.time_str, "str",
+ "Time span (time percent or absolute timestamp)"),
+ OPT_STRING(0, "cpu", &cpu_list, "cpu", "list of cpus to profile"),
+ OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
+ "only consider symbols in these pids"),
+ OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
+ "only consider symbols in these tids"),
OPT_END()
};
@@ -1289,9 +1429,9 @@ static int data_init(int argc, const char **argv)
data__for_each_file(i, d) {
struct perf_data *data = &d->data;
- data->file.path = use_default ? defaults[i] : argv[i];
- data->mode = PERF_DATA_MODE_READ,
- data->force = force,
+ data->path = use_default ? defaults[i] : argv[i];
+ data->mode = PERF_DATA_MODE_READ,
+ data->force = force,
d->idx = i;
}
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index e06e822ce634..6e4f63b0da4a 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -23,9 +23,7 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
struct perf_session *session;
struct perf_evsel *pos;
struct perf_data data = {
- .file = {
- .path = file_name,
- },
+ .path = file_name,
.mode = PERF_DATA_MODE_READ,
.force = details->force,
};
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index f42f228e8899..9c228c55e1fb 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* builtin-ftrace.c
*
* Copyright (c) 2013 LG Electronics, Namhyung Kim <namhyung@kernel.org>
- *
- * Released under the GPL v2.
*/
#include "builtin.h"
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index eda41673c4f3..8e0e06d3edfc 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -12,6 +12,7 @@
#include "util/color.h"
#include "util/evlist.h"
#include "util/evsel.h"
+#include "util/map.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/debug.h"
@@ -19,6 +20,7 @@
#include "util/data.h"
#include "util/auxtrace.h"
#include "util/jit.h"
+#include "util/symbol.h"
#include "util/thread.h"
#include <subcmd/parse-options.h>
@@ -768,10 +770,8 @@ int cmd_inject(int argc, const char **argv)
.input_name = "-",
.samples = LIST_HEAD_INIT(inject.samples),
.output = {
- .file = {
- .path = "-",
- },
- .mode = PERF_DATA_MODE_WRITE,
+ .path = "-",
+ .mode = PERF_DATA_MODE_WRITE,
},
};
struct perf_data data = {
@@ -784,7 +784,7 @@ int cmd_inject(int argc, const char **argv)
"Inject build-ids into the output stream"),
OPT_STRING('i', "input", &inject.input_name, "file",
"input file name"),
- OPT_STRING('o', "output", &inject.output.file.path, "file",
+ OPT_STRING('o', "output", &inject.output.path, "file",
"output file name"),
OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
"Merge sched-stat and sched-switch for getting events "
@@ -832,11 +832,14 @@ int cmd_inject(int argc, const char **argv)
inject.tool.ordered_events = inject.sched_stat;
- data.file.path = inject.input_name;
+ data.path = inject.input_name;
inject.session = perf_session__new(&data, true, &inject.tool);
if (inject.session == NULL)
return -1;
+ if (zstd_init(&(inject.session->zstd_data), 0) < 0)
+ pr_warning("Decompression initialization failed.\n");
+
if (inject.build_ids) {
/*
* to make sure the mmap records are ordered correctly
@@ -867,6 +870,7 @@ int cmd_inject(int argc, const char **argv)
ret = __cmd_inject(&inject);
out_delete:
+ zstd_fini(&(inject.session->zstd_data));
perf_session__delete(inject.session);
return ret;
}
diff --git a/tools/perf/builtin-kallsyms.c b/tools/perf/builtin-kallsyms.c
index 90d1a2305b72..c1a44671b0b5 100644
--- a/tools/perf/builtin-kallsyms.c
+++ b/tools/perf/builtin-kallsyms.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* builtin-kallsyms.c
*
* Builtin command: Look for a symbol in the running kernel and its modules
*
* Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
- *
- * Released under the GPL v2. (and only v2, not any later version)
*/
#include <inttypes.h>
#include "builtin.h"
@@ -13,6 +12,7 @@
#include <subcmd/parse-options.h>
#include "debug.h"
#include "machine.h"
+#include "map.h"
#include "symbol.h"
static int __cmd_kallsyms(int argc, const char **argv)
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index b63bca4b0c2a..b80eee455111 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -6,6 +6,7 @@
#include "util/evsel.h"
#include "util/util.h"
#include "util/config.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
@@ -334,7 +335,7 @@ static int build_alloc_func_list(void)
struct alloc_func *func;
struct machine *machine = &kmem_session->machines.host;
regex_t alloc_func_regex;
- const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
+ static const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
if (ret) {
@@ -1924,7 +1925,7 @@ int cmd_kmem(int argc, const char **argv)
NULL
};
struct perf_session *session;
- const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
+ static const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
int ret = perf_config(kmem_config, NULL);
if (ret)
@@ -1948,7 +1949,7 @@ int cmd_kmem(int argc, const char **argv)
return __cmd_record(argc, argv);
}
- data.file.path = input_name;
+ data.path = input_name;
kmem_session = session = perf_session__new(&data, false, &perf_kmem);
if (session == NULL)
@@ -1974,7 +1975,7 @@ int cmd_kmem(int argc, const char **argv)
goto out_delete;
}
- kmem_page_size = tep_get_page_size(evsel->tp_format->pevent);
+ kmem_page_size = tep_get_page_size(evsel->tp_format->tep);
symbol_conf.use_callchain = true;
}
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 3d4cbc4e87c7..dbb6f737a3e2 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1080,11 +1080,9 @@ static int read_events(struct perf_kvm_stat *kvm)
.ordered_events = true,
};
struct perf_data file = {
- .file = {
- .path = kvm->file_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = kvm->force,
+ .path = kvm->file_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = kvm->force,
};
kvm->tool = eops;
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index ead221e49f00..e0312a1c4792 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -70,10 +70,11 @@ int cmd_list(int argc, const char **argv)
print_symbol_events(NULL, PERF_TYPE_HARDWARE,
event_symbols_hw, PERF_COUNT_HW_MAX, raw_dump);
else if (strcmp(argv[i], "sw") == 0 ||
- strcmp(argv[i], "software") == 0)
+ strcmp(argv[i], "software") == 0) {
print_symbol_events(NULL, PERF_TYPE_SOFTWARE,
event_symbols_sw, PERF_COUNT_SW_MAX, raw_dump);
- else if (strcmp(argv[i], "cache") == 0 ||
+ print_tool_events(NULL, raw_dump);
+ } else if (strcmp(argv[i], "cache") == 0 ||
strcmp(argv[i], "hwcache") == 0)
print_hwcache_events(NULL, raw_dump);
else if (strcmp(argv[i], "pmu") == 0)
@@ -82,9 +83,9 @@ int cmd_list(int argc, const char **argv)
else if (strcmp(argv[i], "sdt") == 0)
print_sdt_events(NULL, NULL, raw_dump);
else if (strcmp(argv[i], "metric") == 0)
- metricgroup__print(true, false, NULL, raw_dump);
+ metricgroup__print(true, false, NULL, raw_dump, details_flag);
else if (strcmp(argv[i], "metricgroup") == 0)
- metricgroup__print(false, true, NULL, raw_dump);
+ metricgroup__print(false, true, NULL, raw_dump, details_flag);
else if ((sep = strchr(argv[i], ':')) != NULL) {
int sep_idx;
@@ -102,7 +103,7 @@ int cmd_list(int argc, const char **argv)
s[sep_idx] = '\0';
print_tracepoint_events(s, s + sep_idx + 1, raw_dump);
print_sdt_events(s, s + sep_idx + 1, raw_dump);
- metricgroup__print(true, true, s, raw_dump);
+ metricgroup__print(true, true, s, raw_dump, details_flag);
free(s);
} else {
if (asprintf(&s, "*%s*", argv[i]) < 0) {
@@ -113,13 +114,14 @@ int cmd_list(int argc, const char **argv)
event_symbols_hw, PERF_COUNT_HW_MAX, raw_dump);
print_symbol_events(s, PERF_TYPE_SOFTWARE,
event_symbols_sw, PERF_COUNT_SW_MAX, raw_dump);
+ print_tool_events(s, raw_dump);
print_hwcache_events(s, raw_dump);
print_pmu_events(s, raw_dump, !desc_flag,
long_desc_flag,
details_flag);
print_tracepoint_events(NULL, s, raw_dump);
print_sdt_events(NULL, s, raw_dump);
- metricgroup__print(true, true, NULL, raw_dump);
+ metricgroup__print(true, true, s, raw_dump, details_flag);
free(s);
}
}
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 6e0189df2b3b..b9810a8d350a 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -866,11 +866,9 @@ static int __cmd_report(bool display_info)
.ordered_events = true,
};
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = force,
};
session = perf_session__new(&data, false, &eops);
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 57393e94d156..f45c8b502f63 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -13,6 +13,7 @@
#include "util/data.h"
#include "util/mem-events.h"
#include "util/debug.h"
+#include "util/map.h"
#include "util/symbol.h"
#define MEM_OPERATION_LOAD 0x1
@@ -238,11 +239,9 @@ static int process_sample_event(struct perf_tool *tool,
static int report_raw_events(struct perf_mem *mem)
{
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = mem->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = mem->force,
};
int ret;
struct perf_session *session = perf_session__new(&data, false,
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 99de91698de1..8bb124e55c6d 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* builtin-probe.c
*
* Builtin probe command: Set up probe events by C expression
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#include <sys/utsname.h>
#include <sys/types.h>
@@ -32,6 +18,7 @@
#include "perf.h"
#include "builtin.h"
+#include "namespaces.h"
#include "util/util.h"
#include "util/strlist.h"
#include "util/strfilter.h"
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 882285fb9f64..e2c3a585a61e 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -23,7 +23,6 @@
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
-#include "util/drv_configs.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/symbol.h"
@@ -39,8 +38,10 @@
#include "util/bpf-loader.h"
#include "util/trigger.h"
#include "util/perf-hooks.h"
+#include "util/cpu-set-sched.h"
#include "util/time-utils.h"
#include "util/units.h"
+#include "util/bpf-event.h"
#include "asm/bug.h"
#include <errno.h>
@@ -61,6 +62,9 @@ struct switch_output {
unsigned long time;
const char *str;
bool set;
+ char **filenames;
+ int num_files;
+ int cur_file;
};
struct record {
@@ -81,12 +85,17 @@ struct record {
bool timestamp_boundary;
struct switch_output switch_output;
unsigned long long samples;
+ cpu_set_t affinity_mask;
};
static volatile int auxtrace_record__snapshot_started;
static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
static DEFINE_TRIGGER(switch_output_trigger);
+static const char *affinity_tags[PERF_AFFINITY_MAX] = {
+ "SYS", "NODE", "CPU"
+};
+
static bool switch_output_signal(struct record *rec)
{
return rec->switch_output.signal &&
@@ -124,6 +133,11 @@ static int record__write(struct record *rec, struct perf_mmap *map __maybe_unuse
return 0;
}
+static int record__aio_enabled(struct record *rec);
+static int record__comp_enabled(struct record *rec);
+static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
+ void *src, size_t src_size);
+
#ifdef HAVE_AIO_SUPPORT
static int record__aio_write(struct aiocb *cblock, int trace_fd,
void *buf, size_t size, off_t off)
@@ -174,9 +188,9 @@ static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
if (rem_size == 0) {
cblock->aio_fildes = -1;
/*
- * md->refcount is incremented in perf_mmap__push() for
- * every enqueued aio write request so decrement it because
- * the request is now complete.
+ * md->refcount is incremented in record__aio_pushfn() for
+ * every aio write request started in record__aio_push() so
+ * decrement it because the request is now complete.
*/
perf_mmap__put(md);
rc = 1;
@@ -231,18 +245,89 @@ static int record__aio_sync(struct perf_mmap *md, bool sync_all)
} while (1);
}
-static int record__aio_pushfn(void *to, struct aiocb *cblock, void *bf, size_t size, off_t off)
+struct record_aio {
+ struct record *rec;
+ void *data;
+ size_t size;
+};
+
+static int record__aio_pushfn(struct perf_mmap *map, void *to, void *buf, size_t size)
{
- struct record *rec = to;
- int ret, trace_fd = rec->session->data->file.fd;
+ struct record_aio *aio = to;
- rec->samples++;
+ /*
+ * map->base data pointed by buf is copied into free map->aio.data[] buffer
+ * to release space in the kernel buffer as fast as possible, calling
+ * perf_mmap__consume() from perf_mmap__push() function.
+ *
+ * That lets the kernel to proceed with storing more profiling data into
+ * the kernel buffer earlier than other per-cpu kernel buffers are handled.
+ *
+ * Coping can be done in two steps in case the chunk of profiling data
+ * crosses the upper bound of the kernel buffer. In this case we first move
+ * part of data from map->start till the upper bound and then the reminder
+ * from the beginning of the kernel buffer till the end of the data chunk.
+ */
+
+ if (record__comp_enabled(aio->rec)) {
+ size = zstd_compress(aio->rec->session, aio->data + aio->size,
+ perf_mmap__mmap_len(map) - aio->size,
+ buf, size);
+ } else {
+ memcpy(aio->data + aio->size, buf, size);
+ }
- ret = record__aio_write(cblock, trace_fd, bf, size, off);
+ if (!aio->size) {
+ /*
+ * Increment map->refcount to guard map->aio.data[] buffer
+ * from premature deallocation because map object can be
+ * released earlier than aio write request started on
+ * map->aio.data[] buffer is complete.
+ *
+ * perf_mmap__put() is done at record__aio_complete()
+ * after started aio request completion or at record__aio_push()
+ * if the request failed to start.
+ */
+ perf_mmap__get(map);
+ }
+
+ aio->size += size;
+
+ return size;
+}
+
+static int record__aio_push(struct record *rec, struct perf_mmap *map, off_t *off)
+{
+ int ret, idx;
+ int trace_fd = rec->session->data->file.fd;
+ struct record_aio aio = { .rec = rec, .size = 0 };
+
+ /*
+ * Call record__aio_sync() to wait till map->aio.data[] buffer
+ * becomes available after previous aio write operation.
+ */
+
+ idx = record__aio_sync(map, false);
+ aio.data = map->aio.data[idx];
+ ret = perf_mmap__push(map, &aio, record__aio_pushfn);
+ if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
+ return ret;
+
+ rec->samples++;
+ ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
if (!ret) {
- rec->bytes_written += size;
+ *off += aio.size;
+ rec->bytes_written += aio.size;
if (switch_output_size(rec))
trigger_hit(&switch_output_trigger);
+ } else {
+ /*
+ * Decrement map->refcount incremented in record__aio_pushfn()
+ * back if record__aio_write() operation failed to start, otherwise
+ * map->refcount is decremented in record__aio_complete() after
+ * aio write operation finishes successfully.
+ */
+ perf_mmap__put(map);
}
return ret;
@@ -264,7 +349,7 @@ static void record__aio_mmap_read_sync(struct record *rec)
struct perf_evlist *evlist = rec->evlist;
struct perf_mmap *maps = evlist->mmap;
- if (!rec->opts.nr_cblocks)
+ if (!record__aio_enabled(rec))
return;
for (i = 0; i < evlist->nr_mmaps; i++) {
@@ -298,13 +383,8 @@ static int record__aio_parse(const struct option *opt,
#else /* HAVE_AIO_SUPPORT */
static int nr_cblocks_max = 0;
-static int record__aio_sync(struct perf_mmap *md __maybe_unused, bool sync_all __maybe_unused)
-{
- return -1;
-}
-
-static int record__aio_pushfn(void *to __maybe_unused, struct aiocb *cblock __maybe_unused,
- void *bf __maybe_unused, size_t size __maybe_unused, off_t off __maybe_unused)
+static int record__aio_push(struct record *rec __maybe_unused, struct perf_mmap *map __maybe_unused,
+ off_t *off __maybe_unused)
{
return -1;
}
@@ -328,6 +408,67 @@ static int record__aio_enabled(struct record *rec)
return rec->opts.nr_cblocks > 0;
}
+#define MMAP_FLUSH_DEFAULT 1
+static int record__mmap_flush_parse(const struct option *opt,
+ const char *str,
+ int unset)
+{
+ int flush_max;
+ struct record_opts *opts = (struct record_opts *)opt->value;
+ static struct parse_tag tags[] = {
+ { .tag = 'B', .mult = 1 },
+ { .tag = 'K', .mult = 1 << 10 },
+ { .tag = 'M', .mult = 1 << 20 },
+ { .tag = 'G', .mult = 1 << 30 },
+ { .tag = 0 },
+ };
+
+ if (unset)
+ return 0;
+
+ if (str) {
+ opts->mmap_flush = parse_tag_value(str, tags);
+ if (opts->mmap_flush == (int)-1)
+ opts->mmap_flush = strtol(str, NULL, 0);
+ }
+
+ if (!opts->mmap_flush)
+ opts->mmap_flush = MMAP_FLUSH_DEFAULT;
+
+ flush_max = perf_evlist__mmap_size(opts->mmap_pages);
+ flush_max /= 4;
+ if (opts->mmap_flush > flush_max)
+ opts->mmap_flush = flush_max;
+
+ return 0;
+}
+
+#ifdef HAVE_ZSTD_SUPPORT
+static unsigned int comp_level_default = 1;
+
+static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
+{
+ struct record_opts *opts = opt->value;
+
+ if (unset) {
+ opts->comp_level = 0;
+ } else {
+ if (str)
+ opts->comp_level = strtol(str, NULL, 0);
+ if (!opts->comp_level)
+ opts->comp_level = comp_level_default;
+ }
+
+ return 0;
+}
+#endif
+static unsigned int comp_level_max = 22;
+
+static int record__comp_enabled(struct record *rec)
+{
+ return rec->opts.comp_level > 0;
+}
+
static int process_synthesized_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
@@ -341,6 +482,11 @@ static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size
{
struct record *rec = to;
+ if (record__comp_enabled(rec)) {
+ size = zstd_compress(rec->session, map->data, perf_mmap__mmap_len(map), bf, size);
+ bf = map->data;
+ }
+
rec->samples++;
return record__write(rec, map, bf, size);
}
@@ -386,7 +532,7 @@ static int record__process_auxtrace(struct perf_tool *tool,
size_t padding;
u8 pad[8] = {0};
- if (!perf_data__is_pipe(data)) {
+ if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
off_t file_offset;
int fd = perf_data__fd(data);
int err;
@@ -531,9 +677,14 @@ static int record__mmap_evlist(struct record *rec,
struct record_opts *opts = &rec->opts;
char msg[512];
+ if (opts->affinity != PERF_AFFINITY_SYS)
+ cpu__setup_cpunode_map();
+
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
- opts->auxtrace_snapshot_mode, opts->nr_cblocks) < 0) {
+ opts->auxtrace_snapshot_mode,
+ opts->nr_cblocks, opts->affinity,
+ opts->mmap_flush, opts->comp_level) < 0) {
if (errno == EPERM) {
pr_err("Permission error mapping pages.\n"
"Consider increasing "
@@ -566,7 +717,6 @@ static int record__open(struct record *rec)
struct perf_evlist *evlist = rec->evlist;
struct perf_session *session = rec->session;
struct record_opts *opts = &rec->opts;
- struct perf_evsel_config_term *err_term;
int rc = 0;
/*
@@ -619,14 +769,6 @@ try_again:
goto out;
}
- if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
- pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
- err_term->val.drv_cfg, perf_evsel__name(pos), errno,
- str_error_r(errno, msg, sizeof(msg)));
- rc = -1;
- goto out;
- }
-
rc = record__mmap(rec);
if (rc)
goto out;
@@ -659,10 +801,9 @@ static int process_sample_event(struct perf_tool *tool,
static int process_buildids(struct record *rec)
{
- struct perf_data *data = &rec->data;
struct perf_session *session = rec->session;
- if (data->size == 0)
+ if (perf_data__size(&rec->data) == 0)
return 0;
/*
@@ -722,15 +863,56 @@ static struct perf_event_header finished_round_event = {
.type = PERF_RECORD_FINISHED_ROUND,
};
+static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
+{
+ if (rec->opts.affinity != PERF_AFFINITY_SYS &&
+ !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
+ CPU_ZERO(&rec->affinity_mask);
+ CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
+ sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
+ }
+}
+
+static size_t process_comp_header(void *record, size_t increment)
+{
+ struct compressed_event *event = record;
+ size_t size = sizeof(*event);
+
+ if (increment) {
+ event->header.size += increment;
+ return increment;
+ }
+
+ event->header.type = PERF_RECORD_COMPRESSED;
+ event->header.size = size;
+
+ return size;
+}
+
+static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
+ void *src, size_t src_size)
+{
+ size_t compressed;
+ size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct compressed_event) - 1;
+
+ compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
+ max_record_size, process_comp_header);
+
+ session->bytes_transferred += src_size;
+ session->bytes_compressed += compressed;
+
+ return compressed;
+}
+
static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
- bool overwrite)
+ bool overwrite, bool synch)
{
u64 bytes_written = rec->bytes_written;
int i;
int rc = 0;
struct perf_mmap *maps;
int trace_fd = rec->data.file.fd;
- off_t off;
+ off_t off = 0;
if (!evlist)
return 0;
@@ -746,27 +928,33 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
off = record__aio_get_pos(trace_fd);
for (i = 0; i < evlist->nr_mmaps; i++) {
+ u64 flush = 0;
struct perf_mmap *map = &maps[i];
if (map->base) {
+ record__adjust_affinity(rec, map);
+ if (synch) {
+ flush = map->flush;
+ map->flush = 1;
+ }
if (!record__aio_enabled(rec)) {
- if (perf_mmap__push(map, rec, record__pushfn) != 0) {
+ if (perf_mmap__push(map, rec, record__pushfn) < 0) {
+ if (synch)
+ map->flush = flush;
rc = -1;
goto out;
}
} else {
- int idx;
- /*
- * Call record__aio_sync() to wait till map->data buffer
- * becomes available after previous aio write request.
- */
- idx = record__aio_sync(map, false);
- if (perf_mmap__aio_push(map, rec, idx, record__aio_pushfn, &off) != 0) {
+ if (record__aio_push(rec, map, &off) < 0) {
record__aio_set_pos(trace_fd, off);
+ if (synch)
+ map->flush = flush;
rc = -1;
goto out;
}
}
+ if (synch)
+ map->flush = flush;
}
if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
@@ -792,15 +980,15 @@ out:
return rc;
}
-static int record__mmap_read_all(struct record *rec)
+static int record__mmap_read_all(struct record *rec, bool synch)
{
int err;
- err = record__mmap_read_evlist(rec, rec->evlist, false);
+ err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
if (err)
return err;
- return record__mmap_read_evlist(rec, rec->evlist, true);
+ return record__mmap_read_evlist(rec, rec->evlist, true, synch);
}
static void record__init_features(struct record *rec)
@@ -826,6 +1014,10 @@ static void record__init_features(struct record *rec)
if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
perf_header__clear_feat(&session->header, HEADER_CLOCKID);
+ perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
+ if (!record__comp_enabled(rec))
+ perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
+
perf_header__clear_feat(&session->header, HEADER_STAT);
}
@@ -839,7 +1031,7 @@ record__finish_output(struct record *rec)
return;
rec->session->header.data_size += rec->bytes_written;
- data->size = lseek(perf_data__fd(data), 0, SEEK_CUR);
+ data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
if (!rec->no_buildid) {
process_buildids(rec);
@@ -879,6 +1071,7 @@ record__switch_output(struct record *rec, bool at_exit)
{
struct perf_data *data = &rec->data;
int fd, err;
+ char *new_filename;
/* Same Size: "2015122520103046"*/
char timestamp[] = "InvalidTimestamp";
@@ -899,7 +1092,7 @@ record__switch_output(struct record *rec, bool at_exit)
fd = perf_data__switch(data, timestamp,
rec->session->header.data_offset,
- at_exit);
+ at_exit, &new_filename);
if (fd >= 0 && !at_exit) {
rec->bytes_written = 0;
rec->session->header.data_size = 0;
@@ -907,7 +1100,22 @@ record__switch_output(struct record *rec, bool at_exit)
if (!quiet)
fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
- data->file.path, timestamp);
+ data->path, timestamp);
+
+ if (rec->switch_output.num_files) {
+ int n = rec->switch_output.cur_file + 1;
+
+ if (n >= rec->switch_output.num_files)
+ n = 0;
+ rec->switch_output.cur_file = n;
+ if (rec->switch_output.filenames[n]) {
+ remove(rec->switch_output.filenames[n]);
+ free(rec->switch_output.filenames[n]);
+ }
+ rec->switch_output.filenames[n] = new_filename;
+ } else {
+ free(new_filename);
+ }
/* Output tracking events */
if (!at_exit) {
@@ -1082,6 +1290,11 @@ static int record__synthesize(struct record *rec, bool tail)
return err;
}
+ err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
+ machine, opts);
+ if (err < 0)
+ pr_warning("Couldn't synthesize bpf events.\n");
+
err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
process_synthesized_event, opts->sample_address,
1);
@@ -1100,7 +1313,9 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
struct perf_data *data = &rec->data;
struct perf_session *session;
bool disabled = false, draining = false;
+ struct perf_evlist *sb_evlist = NULL;
int fd;
+ float ratio = 0;
atexit(record__sig_exit);
signal(SIGCHLD, sig_handler);
@@ -1130,6 +1345,14 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
fd = perf_data__fd(data);
rec->session = session;
+ if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
+ pr_err("Compression initialization failed.\n");
+ return -1;
+ }
+
+ session->header.env.comp_type = PERF_COMP_ZSTD;
+ session->header.env.comp_level = rec->opts.comp_level;
+
record__init_features(rec);
if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
@@ -1159,6 +1382,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
err = -1;
goto out_child;
}
+ session->header.env.comp_mmap_len = session->evlist->mmap_len;
err = bpf__apply_obj_config();
if (err) {
@@ -1200,6 +1424,14 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
goto out_child;
}
+ if (!opts->no_bpf_event)
+ bpf_event__add_sb_event(&sb_evlist, &session->header.env);
+
+ if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
+ pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
+ opts->no_bpf_event = true;
+ }
+
err = record__synthesize(rec, false);
if (err < 0)
goto out_child;
@@ -1294,7 +1526,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
if (trigger_is_hit(&switch_output_trigger) || done || draining)
perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
- if (record__mmap_read_all(rec) < 0) {
+ if (record__mmap_read_all(rec, false) < 0) {
trigger_error(&auxtrace_snapshot_trigger);
trigger_error(&switch_output_trigger);
err = -1;
@@ -1395,8 +1627,14 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
record__synthesize_workload(rec, true);
out_child:
+ record__mmap_read_all(rec, true);
record__aio_mmap_read_sync(rec);
+ if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
+ ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
+ session->header.env.comp_ratio = ratio + 0.5;
+ }
+
if (forks) {
int exit_status;
@@ -1443,13 +1681,23 @@ out_child:
else
samples[0] = '\0';
- fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
+ fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
perf_data__size(data) / 1024.0 / 1024.0,
- data->file.path, postfix, samples);
+ data->path, postfix, samples);
+ if (ratio) {
+ fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
+ rec->session->bytes_transferred / 1024.0 / 1024.0,
+ ratio);
+ }
+ fprintf(stderr, " ]\n");
}
out_delete_session:
+ zstd_fini(&session->zstd_data);
perf_session__delete(session);
+
+ if (!opts->no_bpf_event)
+ perf_evlist__stop_sb_thread(sb_evlist);
return status;
}
@@ -1639,6 +1887,21 @@ static int parse_clockid(const struct option *opt, const char *str, int unset)
return -1;
}
+static int record__parse_affinity(const struct option *opt, const char *str, int unset)
+{
+ struct record_opts *opts = (struct record_opts *)opt->value;
+
+ if (unset || !str)
+ return 0;
+
+ if (!strcasecmp(str, "node"))
+ opts->affinity = PERF_AFFINITY_NODE;
+ else if (!strcasecmp(str, "cpu"))
+ opts->affinity = PERF_AFFINITY_CPU;
+
+ return 0;
+}
+
static int record__parse_mmap_pages(const struct option *opt,
const char *str,
int unset __maybe_unused)
@@ -1782,6 +2045,7 @@ static struct record record = {
.uses_mmap = true,
.default_per_cpu = true,
},
+ .mmap_flush = MMAP_FLUSH_DEFAULT,
},
.tool = {
.sample = process_sample_event,
@@ -1831,7 +2095,7 @@ static struct option __record_options[] = {
OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
"list of cpus to monitor"),
OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
- OPT_STRING('o', "output", &record.data.file.path, "file",
+ OPT_STRING('o', "output", &record.data.path, "file",
"output file name"),
OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
&record.opts.no_inherit_set,
@@ -1839,6 +2103,7 @@ static struct option __record_options[] = {
OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
"synthesize non-sample events at the end of output"),
OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
+ OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
"Fail if the specified frequency can't be used"),
OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
@@ -1847,6 +2112,9 @@ static struct option __record_options[] = {
OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
"number of mmap data pages and AUX area tracing mmap pages",
record__parse_mmap_pages),
+ OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
+ "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
+ record__mmap_flush_parse),
OPT_BOOLEAN(0, "group", &record.opts.group,
"put the counters into a counter group"),
OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
@@ -1900,10 +2168,10 @@ static struct option __record_options[] = {
"use per-thread mmaps"),
OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
"sample selected machine registers on interrupt,"
- " use -I ? to list register names", parse_regs),
+ " use '-I?' to list register names", parse_intr_regs),
OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
"sample selected machine registers on interrupt,"
- " use -I ? to list register names", parse_regs),
+ " use '--user-regs=?' to list register names", parse_user_regs),
OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
"Record running/enabled time of read (:S) events"),
OPT_CALLBACK('k', "clockid", &record.opts,
@@ -1936,9 +2204,11 @@ static struct option __record_options[] = {
OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
"Record timestamp boundary (time of first/last samples)"),
OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
- &record.switch_output.set, "signal,size,time",
- "Switch output when receive SIGUSR2 or cross size,time threshold",
+ &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
+ "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
"signal"),
+ OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
+ "Limit number of switch output generated files"),
OPT_BOOLEAN(0, "dry-run", &dry_run,
"Parse options then exit"),
#ifdef HAVE_AIO_SUPPORT
@@ -1946,6 +2216,14 @@ static struct option __record_options[] = {
&nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
record__aio_parse),
#endif
+ OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
+ "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
+ record__parse_affinity),
+#ifdef HAVE_ZSTD_SUPPORT
+ OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
+ "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
+ record__parse_comp_level),
+#endif
OPT_END()
};
@@ -1980,6 +2258,9 @@ int cmd_record(int argc, const char **argv)
# undef REASON
#endif
+ CPU_ZERO(&rec->affinity_mask);
+ rec->opts.affinity = PERF_AFFINITY_SYS;
+
rec->evlist = perf_evlist__new();
if (rec->evlist == NULL)
return -ENOMEM;
@@ -2002,6 +2283,12 @@ int cmd_record(int argc, const char **argv)
"cgroup monitoring only available in system-wide mode");
}
+
+ if (rec->opts.comp_level != 0) {
+ pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
+ rec->no_buildid = true;
+ }
+
if (rec->opts.record_switch_events &&
!perf_can_record_switch_events()) {
ui__error("kernel does not support recording context switch events\n");
@@ -2019,6 +2306,13 @@ int cmd_record(int argc, const char **argv)
alarm(rec->switch_output.time);
}
+ if (rec->switch_output.num_files) {
+ rec->switch_output.filenames = calloc(sizeof(char *),
+ rec->switch_output.num_files);
+ if (!rec->switch_output.filenames)
+ return -EINVAL;
+ }
+
/*
* Allow aliases to facilitate the lookup of symbols for address
* filters. Refer to auxtrace_parse_filters().
@@ -2140,8 +2434,14 @@ int cmd_record(int argc, const char **argv)
if (rec->opts.nr_cblocks > nr_cblocks_max)
rec->opts.nr_cblocks = nr_cblocks_max;
- if (verbose > 0)
- pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
+ pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
+
+ pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
+ pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
+
+ if (rec->opts.comp_level > comp_level_max)
+ rec->opts.comp_level = comp_level_max;
+ pr_debug("comp level: %d\n", rec->opts.comp_level);
err = __cmd_record(&record, argc, argv);
out:
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 4958095be4fc..1ca533f06a4c 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -16,6 +16,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/err.h>
+#include "util/map.h"
#include "util/symbol.h"
#include "util/callchain.h"
#include "util/values.h"
@@ -46,9 +47,11 @@
#include <errno.h>
#include <inttypes.h>
#include <regex.h>
+#include "sane_ctype.h"
#include <signal.h>
#include <linux/bitmap.h>
#include <linux/stringify.h>
+#include <linux/time64.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
@@ -133,9 +136,6 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter,
if (!ui__has_annotation() && !rep->symbol_ipc)
return 0;
- hist__account_cycles(sample->branch_stack, al, sample,
- rep->nonany_branch_mode);
-
if (sort__mode == SORT_MODE__BRANCH) {
bi = he->branch_info;
err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
@@ -178,9 +178,6 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
if (!ui__has_annotation() && !rep->symbol_ipc)
return 0;
- hist__account_cycles(sample->branch_stack, al, sample,
- rep->nonany_branch_mode);
-
bi = he->branch_info;
err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
if (err)
@@ -279,6 +276,11 @@ static int process_sample_event(struct perf_tool *tool,
if (al.map != NULL)
al.map->dso->hit = 1;
+ if (ui__has_annotation() || rep->symbol_ipc) {
+ hist__account_cycles(sample->branch_stack, &al, sample,
+ rep->nonany_branch_mode);
+ }
+
ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
if (ret < 0)
pr_debug("problem adding hist entry, skipping event\n");
@@ -615,6 +617,21 @@ static int report__collapse_hists(struct report *rep)
return ret;
}
+static int hists__resort_cb(struct hist_entry *he, void *arg)
+{
+ struct report *rep = arg;
+ struct symbol *sym = he->ms.sym;
+
+ if (rep->symbol_ipc && sym && !sym->annotate2) {
+ struct perf_evsel *evsel = hists_to_evsel(he->hists);
+
+ symbol__annotate2(sym, he->ms.map, evsel,
+ &annotation__default_options, NULL);
+ }
+
+ return 0;
+}
+
static void report__output_resort(struct report *rep)
{
struct ui_progress prog;
@@ -622,8 +639,10 @@ static void report__output_resort(struct report *rep)
ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
- evlist__for_each_entry(rep->session->evlist, pos)
- perf_evsel__output_resort(pos, &prog);
+ evlist__for_each_entry(rep->session->evlist, pos) {
+ perf_evsel__output_resort_cb(pos, &prog,
+ hists__resort_cb, rep);
+ }
ui_progress__finish();
}
@@ -753,7 +772,8 @@ static int tasks_print(struct report *rep, FILE *fp)
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
- for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&threads->entries); nd;
+ nd = rb_next(nd)) {
task = tasks + itask++;
task->thread = rb_entry(nd, struct thread, rb_node);
@@ -880,7 +900,7 @@ static int __cmd_report(struct report *rep)
rep->nr_entries += evsel__hists(pos)->nr_entries;
if (rep->nr_entries == 0) {
- ui__error("The %s file has no samples!\n", data->file.path);
+ ui__error("The %s data has no samples!\n", data->path);
return 0;
}
@@ -907,6 +927,43 @@ report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
return parse_callchain_report_opt(arg);
}
+static int
+parse_time_quantum(const struct option *opt, const char *arg,
+ int unset __maybe_unused)
+{
+ unsigned long *time_q = opt->value;
+ char *end;
+
+ *time_q = strtoul(arg, &end, 0);
+ if (end == arg)
+ goto parse_err;
+ if (*time_q == 0) {
+ pr_err("time quantum cannot be 0");
+ return -1;
+ }
+ while (isspace(*end))
+ end++;
+ if (*end == 0)
+ return 0;
+ if (!strcmp(end, "s")) {
+ *time_q *= NSEC_PER_SEC;
+ return 0;
+ }
+ if (!strcmp(end, "ms")) {
+ *time_q *= NSEC_PER_MSEC;
+ return 0;
+ }
+ if (!strcmp(end, "us")) {
+ *time_q *= NSEC_PER_USEC;
+ return 0;
+ }
+ if (!strcmp(end, "ns"))
+ return 0;
+parse_err:
+ pr_err("Cannot parse time quantum `%s'\n", arg);
+ return -1;
+}
+
int
report_parse_ignore_callees_opt(const struct option *opt __maybe_unused,
const char *arg, int unset __maybe_unused)
@@ -956,9 +1013,9 @@ int cmd_report(int argc, const char **argv)
int branch_mode = -1;
bool branch_call_mode = false;
#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
- const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
- CALLCHAIN_REPORT_HELP
- "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
+ static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
+ CALLCHAIN_REPORT_HELP
+ "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
const char * const report_usage[] = {
"perf report [<options>]",
@@ -1025,10 +1082,9 @@ int cmd_report(int argc, const char **argv)
OPT_BOOLEAN(0, "header-only", &report.header_only,
"Show only data header."),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
- "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
- " Please refer the man page for the complete list."),
+ sort_help("sort by key(s):")),
OPT_STRING('F', "fields", &field_order, "key[,keys...]",
- "output field(s): overhead, period, sample plus all of sort keys"),
+ sort_help("output field(s): overhead period sample ")),
OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
"Show sample percentage for different cpu modes"),
OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
@@ -1101,6 +1157,8 @@ int cmd_report(int argc, const char **argv)
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
+ OPT_INTEGER(0, "samples", &symbol_conf.res_sample,
+ "Number of samples to save per histogram entry for individual browsing"),
OPT_CALLBACK(0, "percent-limit", &report, "percent",
"Don't show entries under that percent", parse_percent_limit),
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
@@ -1128,6 +1186,10 @@ int cmd_report(int argc, const char **argv)
OPT_CALLBACK(0, "percent-type", &report.annotation_opts, "local-period",
"Set percent type local/global-period/hits",
annotate_parse_percent_type),
+ OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"),
+ OPT_CALLBACK(0, "time-quantum", &symbol_conf.time_quantum, "time (ms|us|ns|s)",
+ "Set time quantum for time sort key (default 100ms)",
+ parse_time_quantum),
OPT_END()
};
struct perf_data data = {
@@ -1188,14 +1250,17 @@ int cmd_report(int argc, const char **argv)
input_name = "perf.data";
}
- data.file.path = input_name;
- data.force = symbol_conf.force;
+ data.path = input_name;
+ data.force = symbol_conf.force;
repeat:
session = perf_session__new(&data, false, &report.tool);
if (session == NULL)
return -1;
+ if (zstd_init(&(session->zstd_data), 0) < 0)
+ pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
+
if (report.queue_size) {
ordered_events__set_alloc_size(&session->ordered_events,
report.queue_size);
@@ -1356,36 +1421,13 @@ repeat:
if (symbol__init(&session->header.env) < 0)
goto error;
- report.ptime_range = perf_time__range_alloc(report.time_str,
- &report.range_size);
- if (!report.ptime_range) {
- ret = -ENOMEM;
- goto error;
- }
-
- if (perf_time__parse_str(report.ptime_range, report.time_str) != 0) {
- if (session->evlist->first_sample_time == 0 &&
- session->evlist->last_sample_time == 0) {
- pr_err("HINT: no first/last sample time found in perf data.\n"
- "Please use latest perf binary to execute 'perf record'\n"
- "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
- ret = -EINVAL;
- goto error;
- }
-
- report.range_num = perf_time__percent_parse_str(
- report.ptime_range, report.range_size,
- report.time_str,
- session->evlist->first_sample_time,
- session->evlist->last_sample_time);
-
- if (report.range_num < 0) {
- pr_err("Invalid time string\n");
- ret = -EINVAL;
+ if (report.time_str) {
+ ret = perf_time__parse_for_ranges(report.time_str, session,
+ &report.ptime_range,
+ &report.range_size,
+ &report.range_num);
+ if (ret < 0)
goto error;
- }
- } else {
- report.range_num = 1;
}
if (session->tevent.pevent &&
@@ -1407,8 +1449,9 @@ repeat:
ret = 0;
error:
- zfree(&report.ptime_range);
-
+ if (report.ptime_range)
+ zfree(&report.ptime_range);
+ zstd_fini(&(session->zstd_data));
perf_session__delete(session);
return ret;
}
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index cbf39dab19c1..275f2d92a7bf 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -213,7 +213,7 @@ struct perf_sched {
u64 all_runtime;
u64 all_count;
u64 cpu_last_switched[MAX_CPUS];
- struct rb_root atom_root, sorted_atom_root, merged_atom_root;
+ struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
struct list_head sort_list, cmp_pid;
bool force;
bool skip_merge;
@@ -271,7 +271,7 @@ struct evsel_runtime {
struct idle_thread_runtime {
struct thread_runtime tr;
struct thread *last_thread;
- struct rb_root sorted_root;
+ struct rb_root_cached sorted_root;
struct callchain_root callchain;
struct callchain_cursor cursor;
};
@@ -950,10 +950,10 @@ thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *
}
static struct work_atoms *
-thread_atoms_search(struct rb_root *root, struct thread *thread,
+thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
struct list_head *sort_list)
{
- struct rb_node *node = root->rb_node;
+ struct rb_node *node = root->rb_root.rb_node;
struct work_atoms key = { .thread = thread };
while (node) {
@@ -976,10 +976,11 @@ thread_atoms_search(struct rb_root *root, struct thread *thread,
}
static void
-__thread_latency_insert(struct rb_root *root, struct work_atoms *data,
+__thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
struct list_head *sort_list)
{
- struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
+ bool leftmost = true;
while (*new) {
struct work_atoms *this;
@@ -992,12 +993,14 @@ __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
if (cmp > 0)
new = &((*new)->rb_left);
- else
+ else {
new = &((*new)->rb_right);
+ leftmost = false;
+ }
}
rb_link_node(&data->node, parent, new);
- rb_insert_color(&data->node, root);
+ rb_insert_color_cached(&data->node, root, leftmost);
}
static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
@@ -1447,15 +1450,15 @@ static int sort_dimension__add(const char *tok, struct list_head *list)
static void perf_sched__sort_lat(struct perf_sched *sched)
{
struct rb_node *node;
- struct rb_root *root = &sched->atom_root;
+ struct rb_root_cached *root = &sched->atom_root;
again:
for (;;) {
struct work_atoms *data;
- node = rb_first(root);
+ node = rb_first_cached(root);
if (!node)
break;
- rb_erase(node, root);
+ rb_erase_cached(node, root);
data = rb_entry(node, struct work_atoms, node);
__thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
}
@@ -1782,11 +1785,9 @@ static int perf_sched__read_events(struct perf_sched *sched)
};
struct perf_session *session;
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = sched->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = sched->force,
};
int rc = -1;
@@ -2762,12 +2763,12 @@ static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
return ret;
}
-static size_t timehist_print_idlehist_callchain(struct rb_root *root)
+static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
{
size_t ret = 0;
FILE *fp = stdout;
struct callchain_node *chain;
- struct rb_node *rb_node = rb_first(root);
+ struct rb_node *rb_node = rb_first_cached(root);
printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
@@ -2868,7 +2869,7 @@ static void timehist_print_summary(struct perf_sched *sched,
if (itr == NULL)
continue;
- callchain_param.sort(&itr->sorted_root, &itr->callchain,
+ callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
0, &callchain_param);
printf(" CPU %2d:", i);
@@ -2955,11 +2956,9 @@ static int perf_sched__timehist(struct perf_sched *sched)
{ "sched:sched_migrate_task", timehist_migrate_task_event, },
};
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = sched->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = sched->force,
};
struct perf_session *session;
@@ -3074,11 +3073,12 @@ static void print_bad_events(struct perf_sched *sched)
}
}
-static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
+static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
{
- struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
struct work_atoms *this;
const char *comm = thread__comm_str(data->thread), *this_comm;
+ bool leftmost = true;
while (*new) {
int cmp;
@@ -3092,6 +3092,7 @@ static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
new = &((*new)->rb_left);
} else if (cmp < 0) {
new = &((*new)->rb_right);
+ leftmost = false;
} else {
this->num_merged++;
this->total_runtime += data->total_runtime;
@@ -3109,7 +3110,7 @@ static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
data->num_merged++;
rb_link_node(&data->node, parent, new);
- rb_insert_color(&data->node, root);
+ rb_insert_color_cached(&data->node, root, leftmost);
}
static void perf_sched__merge_lat(struct perf_sched *sched)
@@ -3120,8 +3121,8 @@ static void perf_sched__merge_lat(struct perf_sched *sched)
if (sched->skip_merge)
return;
- while ((node = rb_first(&sched->atom_root))) {
- rb_erase(node, &sched->atom_root);
+ while ((node = rb_first_cached(&sched->atom_root))) {
+ rb_erase_cached(node, &sched->atom_root);
data = rb_entry(node, struct work_atoms, node);
__merge_work_atoms(&sched->merged_atom_root, data);
}
@@ -3143,7 +3144,7 @@ static int perf_sched__lat(struct perf_sched *sched)
printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
printf(" -----------------------------------------------------------------------------------------------------------------\n");
- next = rb_first(&sched->sorted_atom_root);
+ next = rb_first_cached(&sched->sorted_atom_root);
while (next) {
struct work_atoms *work_list;
@@ -3336,7 +3337,7 @@ static int __cmd_record(int argc, const char **argv)
int cmd_sched(int argc, const char **argv)
{
- const char default_sort_order[] = "avg, max, switch, runtime";
+ static const char default_sort_order[] = "avg, max, switch, runtime";
struct perf_sched sched = {
.tool = {
.sample = perf_sched__process_tracepoint_sample,
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index ac221f137ed2..61cfd8f70989 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -10,6 +10,7 @@
#include "util/perf_regs.h"
#include "util/session.h"
#include "util/tool.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/trace-event.h"
@@ -28,10 +29,12 @@
#include "util/time-utils.h"
#include "util/path.h"
#include "print_binary.h"
+#include "archinsn.h"
#include <linux/bitmap.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/time64.h>
+#include <sys/utsname.h>
#include "asm/bug.h"
#include "util/mem-events.h"
#include "util/dump-insn.h"
@@ -50,6 +53,8 @@
static char const *script_name;
static char const *generate_script_lang;
+static bool reltime;
+static u64 initial_time;
static bool debug_mode;
static u64 last_timestamp;
static u64 nr_unordered;
@@ -57,11 +62,11 @@ static bool no_callchain;
static bool latency_format;
static bool system_wide;
static bool print_flags;
-static bool nanosecs;
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
static struct perf_stat_config stat_config;
static int max_blocks;
+static bool native_arch;
unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
@@ -148,6 +153,7 @@ static struct {
unsigned int print_ip_opts;
u64 fields;
u64 invalid_fields;
+ u64 user_set_fields;
} output[OUTPUT_TYPE_MAX] = {
[PERF_TYPE_HARDWARE] = {
@@ -344,7 +350,7 @@ static int perf_evsel__do_check_stype(struct perf_evsel *evsel,
if (attr->sample_type & sample_type)
return 0;
- if (output[type].user_set) {
+ if (output[type].user_set_fields & field) {
if (allow_user_set)
return 0;
evname = perf_evsel__name(evsel);
@@ -682,15 +688,21 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
}
if (PRINT_FIELD(TIME)) {
- nsecs = sample->time;
+ u64 t = sample->time;
+ if (reltime) {
+ if (!initial_time)
+ initial_time = sample->time;
+ t = sample->time - initial_time;
+ }
+ nsecs = t;
secs = nsecs / NSEC_PER_SEC;
nsecs -= secs * NSEC_PER_SEC;
- if (nanosecs)
+ if (symbol_conf.nanosecs)
printed += fprintf(fp, "%5lu.%09llu: ", secs, nsecs);
else {
char sample_time[32];
- timestamp__scnprintf_usec(sample->time, sample_time, sizeof(sample_time));
+ timestamp__scnprintf_usec(t, sample_time, sizeof(sample_time));
printed += fprintf(fp, "%12s: ", sample_time);
}
}
@@ -1225,6 +1237,12 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
return len + dlen;
}
+__weak void arch_fetch_insn(struct perf_sample *sample __maybe_unused,
+ struct thread *thread __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+}
+
static int perf_sample__fprintf_insn(struct perf_sample *sample,
struct perf_event_attr *attr,
struct thread *thread,
@@ -1232,9 +1250,12 @@ static int perf_sample__fprintf_insn(struct perf_sample *sample,
{
int printed = 0;
+ if (sample->insn_len == 0 && native_arch)
+ arch_fetch_insn(sample, thread, machine);
+
if (PRINT_FIELD(INSNLEN))
printed += fprintf(fp, " ilen: %d", sample->insn_len);
- if (PRINT_FIELD(INSN)) {
+ if (PRINT_FIELD(INSN) && sample->insn_len) {
int i;
printed += fprintf(fp, " insn:");
@@ -1920,6 +1941,13 @@ static int cleanup_scripting(void)
return scripting_ops ? scripting_ops->stop_script() : 0;
}
+static bool filter_cpu(struct perf_sample *sample)
+{
+ if (cpu_list)
+ return !test_bit(sample->cpu, cpu_bitmap);
+ return false;
+}
+
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -1954,7 +1982,7 @@ static int process_sample_event(struct perf_tool *tool,
if (al.filtered)
goto out_put;
- if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
+ if (filter_cpu(sample))
goto out_put;
if (scripting_ops)
@@ -2039,9 +2067,11 @@ static int process_comm_event(struct perf_tool *tool,
sample->tid = event->comm.tid;
sample->pid = event->comm.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
PERF_RECORD_COMM, stdout);
- perf_event__fprintf(event, stdout);
+ perf_event__fprintf(event, stdout);
+ }
ret = 0;
out:
thread__put(thread);
@@ -2075,9 +2105,11 @@ static int process_namespaces_event(struct perf_tool *tool,
sample->tid = event->namespaces.tid;
sample->pid = event->namespaces.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_NAMESPACES, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_NAMESPACES, stdout);
+ perf_event__fprintf(event, stdout);
+ }
ret = 0;
out:
thread__put(thread);
@@ -2109,9 +2141,11 @@ static int process_fork_event(struct perf_tool *tool,
sample->tid = event->fork.tid;
sample->pid = event->fork.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_FORK, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_FORK, stdout);
+ perf_event__fprintf(event, stdout);
+ }
thread__put(thread);
return 0;
@@ -2139,9 +2173,11 @@ static int process_exit_event(struct perf_tool *tool,
sample->tid = event->fork.tid;
sample->pid = event->fork.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_EXIT, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_EXIT, stdout);
+ perf_event__fprintf(event, stdout);
+ }
if (perf_event__process_exit(tool, event, sample, machine) < 0)
err = -1;
@@ -2175,9 +2211,11 @@ static int process_mmap_event(struct perf_tool *tool,
sample->tid = event->mmap.tid;
sample->pid = event->mmap.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_MMAP, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_MMAP, stdout);
+ perf_event__fprintf(event, stdout);
+ }
thread__put(thread);
return 0;
}
@@ -2207,9 +2245,11 @@ static int process_mmap2_event(struct perf_tool *tool,
sample->tid = event->mmap2.tid;
sample->pid = event->mmap2.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_MMAP2, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_MMAP2, stdout);
+ perf_event__fprintf(event, stdout);
+ }
thread__put(thread);
return 0;
}
@@ -2234,9 +2274,11 @@ static int process_switch_event(struct perf_tool *tool,
return -1;
}
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_SWITCH, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_SWITCH, stdout);
+ perf_event__fprintf(event, stdout);
+ }
thread__put(thread);
return 0;
}
@@ -2257,9 +2299,11 @@ process_lost_event(struct perf_tool *tool,
if (thread == NULL)
return -1;
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_LOST, stdout);
- perf_event__fprintf(event, stdout);
+ if (!filter_cpu(sample)) {
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_LOST, stdout);
+ perf_event__fprintf(event, stdout);
+ }
thread__put(thread);
return 0;
}
@@ -2559,6 +2603,10 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
pr_warning("Overriding previous field request for %s events.\n",
event_type(type));
+ /* Don't override defaults for +- */
+ if (strchr(tok, '+') || strchr(tok, '-'))
+ goto parse;
+
output[type].fields = 0;
output[type].user_set = true;
output[type].wildcard_set = false;
@@ -2627,10 +2675,13 @@ parse:
pr_warning("\'%s\' not valid for %s events. Ignoring.\n",
all_output_options[i].str, event_type(j));
} else {
- if (change == REMOVE)
+ if (change == REMOVE) {
output[j].fields &= ~all_output_options[i].field;
- else
+ output[j].user_set_fields &= ~all_output_options[i].field;
+ } else {
output[j].fields |= all_output_options[i].field;
+ output[j].user_set_fields |= all_output_options[i].field;
+ }
output[j].user_set = true;
output[j].wildcard_set = true;
}
@@ -2643,6 +2694,10 @@ parse:
rc = -EINVAL;
goto out;
}
+ if (change == REMOVE)
+ output[type].fields &= ~all_output_options[i].field;
+ else
+ output[type].fields |= all_output_options[i].field;
output[type].user_set = true;
output[type].wildcard_set = true;
}
@@ -2935,17 +2990,16 @@ static int check_ev_match(char *dir_name, char *scriptname,
* will list all statically runnable scripts, select one, execute it and
* show the output in a perf browser.
*/
-int find_scripts(char **scripts_array, char **scripts_path_array)
+int find_scripts(char **scripts_array, char **scripts_path_array, int num,
+ int pathlen)
{
struct dirent *script_dirent, *lang_dirent;
char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
DIR *scripts_dir, *lang_dir;
struct perf_session *session;
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
};
char *temp;
int i = 0;
@@ -2982,7 +3036,10 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
/* Skip those real time scripts: xxxtop.p[yl] */
if (strstr(script_dirent->d_name, "top."))
continue;
- sprintf(scripts_path_array[i], "%s/%s", lang_path,
+ if (i >= num)
+ break;
+ snprintf(scripts_path_array[i], pathlen, "%s/%s",
+ lang_path,
script_dirent->d_name);
temp = strchr(script_dirent->d_name, '.');
snprintf(scripts_array[i],
@@ -3221,7 +3278,7 @@ static int parse_insn_trace(const struct option *opt __maybe_unused,
{
parse_output_fields(NULL, "+insn,-event,-period", 0);
itrace_parse_synth_opts(opt, "i0ns", 0);
- nanosecs = true;
+ symbol_conf.nanosecs = true;
return 0;
}
@@ -3239,7 +3296,7 @@ static int parse_call_trace(const struct option *opt __maybe_unused,
{
parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent", 0);
itrace_parse_synth_opts(opt, "cewp", 0);
- nanosecs = true;
+ symbol_conf.nanosecs = true;
return 0;
}
@@ -3249,7 +3306,7 @@ static int parse_callret_trace(const struct option *opt __maybe_unused,
{
parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent,+flags", 0);
itrace_parse_synth_opts(opt, "crewp", 0);
- nanosecs = true;
+ symbol_conf.nanosecs = true;
return 0;
}
@@ -3266,6 +3323,7 @@ int cmd_script(int argc, const char **argv)
.set = false,
.default_no_sample = true,
};
+ struct utsname uts;
char *script_path = NULL;
const char **__argv;
int i, j, err = 0;
@@ -3363,6 +3421,7 @@ int cmd_script(int argc, const char **argv)
"Set the maximum stack depth when parsing the callchain, "
"anything beyond the specified depth will be ignored. "
"Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
+ OPT_BOOLEAN(0, "reltime", &reltime, "Show time stamps relative to start"),
OPT_BOOLEAN('I', "show-info", &show_full_info,
"display extended information from perf.data file"),
OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path,
@@ -3384,7 +3443,7 @@ int cmd_script(int argc, const char **argv)
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
OPT_INTEGER(0, "max-blocks", &max_blocks,
"Maximum number of code blocks to dump with brstackinsn"),
- OPT_BOOLEAN(0, "ns", &nanosecs,
+ OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs,
"Use 9 decimal places when displaying time"),
OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
"Instruction Tracing options\n" ITRACE_HELP,
@@ -3418,8 +3477,8 @@ int cmd_script(int argc, const char **argv)
argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- data.file.path = input_name;
- data.force = symbol_conf.force;
+ data.path = input_name;
+ data.force = symbol_conf.force;
if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) {
rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
@@ -3437,6 +3496,11 @@ int cmd_script(int argc, const char **argv)
}
}
+ if (script.time_str && reltime) {
+ fprintf(stderr, "Don't combine --reltime with --time\n");
+ return -1;
+ }
+
if (itrace_synth_opts.callchain &&
itrace_synth_opts.callchain_sz > scripting_max_stack)
scripting_max_stack = itrace_synth_opts.callchain_sz;
@@ -3604,6 +3668,12 @@ int cmd_script(int argc, const char **argv)
if (symbol__init(&session->header.env) < 0)
goto out_delete;
+ uname(&uts);
+ if (!strcmp(uts.machine, session->header.env.arch) ||
+ (!strcmp(uts.machine, "x86_64") &&
+ !strcmp(session->header.env.arch, "i386")))
+ native_arch = true;
+
script.session = session;
script__setup_sample_type(&script);
@@ -3645,7 +3715,7 @@ int cmd_script(int argc, const char **argv)
goto out_delete;
}
- input = open(data.file.path, O_RDONLY); /* input_name */
+ input = open(data.path, O_RDONLY); /* input_name */
if (input < 0) {
err = -errno;
perror("failed to open file");
@@ -3688,37 +3758,13 @@ int cmd_script(int argc, const char **argv)
if (err < 0)
goto out_delete;
- script.ptime_range = perf_time__range_alloc(script.time_str,
- &script.range_size);
- if (!script.ptime_range) {
- err = -ENOMEM;
- goto out_delete;
- }
-
- /* needs to be parsed after looking up reference time */
- if (perf_time__parse_str(script.ptime_range, script.time_str) != 0) {
- if (session->evlist->first_sample_time == 0 &&
- session->evlist->last_sample_time == 0) {
- pr_err("HINT: no first/last sample time found in perf data.\n"
- "Please use latest perf binary to execute 'perf record'\n"
- "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
- err = -EINVAL;
- goto out_delete;
- }
-
- script.range_num = perf_time__percent_parse_str(
- script.ptime_range, script.range_size,
- script.time_str,
- session->evlist->first_sample_time,
- session->evlist->last_sample_time);
-
- if (script.range_num < 0) {
- pr_err("Invalid time string\n");
- err = -EINVAL;
+ if (script.time_str) {
+ err = perf_time__parse_for_ranges(script.time_str, session,
+ &script.ptime_range,
+ &script.range_size,
+ &script.range_num);
+ if (err < 0)
goto out_delete;
- }
- } else {
- script.range_num = 1;
}
err = __cmd_script(&script);
@@ -3726,7 +3772,8 @@ int cmd_script(int argc, const char **argv)
flush_scripting();
out_delete:
- zfree(&script.ptime_range);
+ if (script.ptime_range)
+ zfree(&script.ptime_range);
perf_evlist__free_stats(session->evlist);
perf_session__delete(session);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 63a3afc7f32b..1ae66f09dc7d 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* builtin-stat.c
*
@@ -37,8 +38,6 @@
* Mike Galbraith <efault@gmx.de>
* Paul Mackerras <paulus@samba.org>
* Jaswinder Singh Rajput <jaswinder@kernel.org>
- *
- * Released under the GPL v2. (and only v2, not any later version)
*/
#include "perf.h"
@@ -52,7 +51,6 @@
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
-#include "util/drv_configs.h"
#include "util/color.h"
#include "util/stat.h"
#include "util/header.h"
@@ -83,7 +81,6 @@
#include <unistd.h>
#include <sys/time.h>
#include <sys/resource.h>
-#include <sys/wait.h>
#include "sane_ctype.h"
@@ -246,11 +243,25 @@ perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread,
process_synthesized_event, NULL);
}
+static int read_single_counter(struct perf_evsel *counter, int cpu,
+ int thread, struct timespec *rs)
+{
+ if (counter->tool_event == PERF_TOOL_DURATION_TIME) {
+ u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL;
+ struct perf_counts_values *count =
+ perf_counts(counter->counts, cpu, thread);
+ count->ena = count->run = val;
+ count->val = val;
+ return 0;
+ }
+ return perf_evsel__read_counter(counter, cpu, thread);
+}
+
/*
* Read out the results of a single counter:
* do not aggregate counts across CPUs in system-wide mode
*/
-static int read_counter(struct perf_evsel *counter)
+static int read_counter(struct perf_evsel *counter, struct timespec *rs)
{
int nthreads = thread_map__nr(evsel_list->threads);
int ncpus, cpu, thread;
@@ -277,7 +288,7 @@ static int read_counter(struct perf_evsel *counter)
* (via perf_evsel__read_counter) and sets threir count->loaded.
*/
if (!count->loaded &&
- perf_evsel__read_counter(counter, cpu, thread)) {
+ read_single_counter(counter, cpu, thread, rs)) {
counter->counts->scaled = -1;
perf_counts(counter->counts, cpu, thread)->ena = 0;
perf_counts(counter->counts, cpu, thread)->run = 0;
@@ -306,13 +317,13 @@ static int read_counter(struct perf_evsel *counter)
return 0;
}
-static void read_counters(void)
+static void read_counters(struct timespec *rs)
{
struct perf_evsel *counter;
int ret;
evlist__for_each_entry(evsel_list, counter) {
- ret = read_counter(counter);
+ ret = read_counter(counter, rs);
if (ret)
pr_debug("failed to read counter %s\n", counter->name);
@@ -325,11 +336,11 @@ static void process_interval(void)
{
struct timespec ts, rs;
- read_counters();
-
clock_gettime(CLOCK_MONOTONIC, &ts);
diff_timespec(&rs, &ts, &ref_time);
+ read_counters(&rs);
+
if (STAT_RECORD) {
if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL))
pr_err("failed to write stat round event\n");
@@ -418,7 +429,6 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
int status = 0;
const bool forks = (argc > 0);
bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
- struct perf_evsel_config_term *err_term;
if (interval) {
ts.tv_sec = interval / USEC_PER_MSEC;
@@ -515,13 +525,6 @@ try_again:
return -1;
}
- if (perf_evlist__apply_drv_configs(evsel_list, &counter, &err_term)) {
- pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
- err_term->val.drv_cfg, perf_evsel__name(counter), errno,
- str_error_r(errno, msg, sizeof(msg)));
- return -1;
- }
-
if (STAT_RECORD) {
int err, fd = perf_data__fd(&perf_stat.data);
@@ -603,7 +606,7 @@ try_again:
* avoid arbitrary skew, we must read all counters before closing any
* group leaders.
*/
- read_counters();
+ read_counters(&(struct timespec) { .tv_nsec = t1-t0 });
perf_evlist__close(evsel_list);
return WEXITSTATUS(status);
@@ -728,7 +731,8 @@ static struct option stat_options[] = {
"system-wide collection from all CPUs"),
OPT_BOOLEAN('g', "group", &group,
"put the counters into a counter group"),
- OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
+ OPT_BOOLEAN(0, "scale", &stat_config.scale,
+ "Use --no-scale to disable counter scaling for multiplexing"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_INTEGER('r', "repeat", &stat_config.run_count,
@@ -842,6 +846,18 @@ static int perf_stat__get_core_cached(struct perf_stat_config *config,
return perf_stat__get_aggr(config, perf_stat__get_core, map, idx);
}
+static bool term_percore_set(void)
+{
+ struct perf_evsel *counter;
+
+ evlist__for_each_entry(evsel_list, counter) {
+ if (counter->percore)
+ return true;
+ }
+
+ return false;
+}
+
static int perf_stat_init_aggr_mode(void)
{
int nr;
@@ -862,6 +878,15 @@ static int perf_stat_init_aggr_mode(void)
stat_config.aggr_get_id = perf_stat__get_core_cached;
break;
case AGGR_NONE:
+ if (term_percore_set()) {
+ if (cpu_map__build_core_map(evsel_list->cpus,
+ &stat_config.aggr_map)) {
+ perror("cannot build core map");
+ return -1;
+ }
+ stat_config.aggr_get_id = perf_stat__get_core_cached;
+ }
+ break;
case AGGR_GLOBAL:
case AGGR_THREAD:
case AGGR_UNSET:
@@ -1317,6 +1342,7 @@ static void init_features(struct perf_session *session)
for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
perf_header__set_feat(&session->header, feat);
+ perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
@@ -1332,7 +1358,7 @@ static int __cmd_record(int argc, const char **argv)
PARSE_OPT_STOP_AT_NON_OPTION);
if (output_name)
- data->file.path = output_name;
+ data->path = output_name;
if (stat_config.run_count != 1 || forever) {
pr_err("Cannot use -r option with perf stat record.\n");
@@ -1533,8 +1559,8 @@ static int __cmd_report(int argc, const char **argv)
input_name = "perf.data";
}
- perf_stat.data.file.path = input_name;
- perf_stat.data.mode = PERF_DATA_MODE_READ;
+ perf_stat.data.path = input_name;
+ perf_stat.data.mode = PERF_DATA_MODE_READ;
session = perf_session__new(&perf_stat.data, false, &perf_stat.tool);
if (session == NULL)
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 775b99833e51..145a19668114 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* builtin-timechart.c - make an svg timechart of system activity
*
@@ -5,11 +6,6 @@
*
* Authors:
* Arjan van de Ven <arjan@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
*/
#include <errno.h>
@@ -1602,11 +1598,9 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
{ "syscalls:sys_exit_select", process_exit_poll },
};
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = tchart->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = tchart->force,
};
struct perf_session *session = perf_session__new(&data, false,
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index f64e312db787..466621cd1017 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* builtin-top.c
*
@@ -14,21 +15,20 @@
* Wu Fengguang <fengguang.wu@intel.com>
* Mike Galbraith <efault@gmx.de>
* Paul Mackerras <paulus@samba.org>
- *
- * Released under the GPL v2. (and only v2, not any later version)
*/
#include "builtin.h"
#include "perf.h"
#include "util/annotate.h"
+#include "util/bpf-event.h"
#include "util/config.h"
#include "util/color.h"
-#include "util/drv_configs.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/event.h"
#include "util/machine.h"
+#include "util/map.h"
#include "util/session.h"
#include "util/symbol.h"
#include "util/thread.h"
@@ -366,7 +366,7 @@ static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
if (p)
*p = 0;
- next = rb_first(&hists->entries);
+ next = rb_first_cached(&hists->entries);
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
@@ -1184,37 +1184,35 @@ static void init_process_thread(struct perf_top *top)
static int __cmd_top(struct perf_top *top)
{
- char msg[512];
- struct perf_evsel *pos;
- struct perf_evsel_config_term *err_term;
- struct perf_evlist *evlist = top->evlist;
struct record_opts *opts = &top->record_opts;
pthread_t thread, thread_process;
int ret;
- top->session = perf_session__new(NULL, false, NULL);
- if (top->session == NULL)
- return -1;
-
if (!top->annotation_opts.objdump_path) {
ret = perf_env__lookup_objdump(&top->session->header.env,
&top->annotation_opts.objdump_path);
if (ret)
- goto out_delete;
+ return ret;
}
ret = callchain_param__setup_sample_type(&callchain_param);
if (ret)
- goto out_delete;
+ return ret;
if (perf_session__register_idle_thread(top->session) < 0)
- goto out_delete;
+ return ret;
if (top->nr_threads_synthesize > 1)
perf_set_multithreaded();
init_process_thread(top);
+ ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
+ &top->session->machines.host,
+ &top->record_opts);
+ if (ret < 0)
+ pr_warning("Couldn't synthesize bpf events.\n");
+
machine__synthesize_threads(&top->session->machines.host, &opts->target,
top->evlist->threads, false,
top->nr_threads_synthesize);
@@ -1224,21 +1222,18 @@ static int __cmd_top(struct perf_top *top)
if (perf_hpp_list.socket) {
ret = perf_env__read_cpu_topology_map(&perf_env);
- if (ret < 0)
- goto out_err_cpu_topo;
+ if (ret < 0) {
+ char errbuf[BUFSIZ];
+ const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
+
+ ui__error("Could not read the CPU topology map: %s\n", err);
+ return ret;
+ }
}
ret = perf_top__start_counters(top);
if (ret)
- goto out_delete;
-
- ret = perf_evlist__apply_drv_configs(evlist, &pos, &err_term);
- if (ret) {
- pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
- err_term->val.drv_cfg, perf_evsel__name(pos), errno,
- str_error_r(errno, msg, sizeof(msg)));
- goto out_delete;
- }
+ return ret;
top->session->evlist = top->evlist;
perf_session__set_id_hdr_size(top->session);
@@ -1257,7 +1252,7 @@ static int __cmd_top(struct perf_top *top)
ret = -1;
if (pthread_create(&thread_process, NULL, process_thread, top)) {
ui__error("Could not create process thread.\n");
- goto out_delete;
+ return ret;
}
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
@@ -1301,19 +1296,7 @@ out_join:
out_join_thread:
pthread_cond_signal(&top->qe.cond);
pthread_join(thread_process, NULL);
-out_delete:
- perf_session__delete(top->session);
- top->session = NULL;
-
return ret;
-
-out_err_cpu_topo: {
- char errbuf[BUFSIZ];
- const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
-
- ui__error("Could not read the CPU topology map: %s\n", err);
- goto out_delete;
-}
}
static int
@@ -1393,6 +1376,7 @@ int cmd_top(int argc, const char **argv)
* */
.overwrite = 0,
.sample_time = true,
+ .sample_time_set = true,
},
.max_stack = sysctl__max_stack(),
.annotation_opts = annotation__default_options,
@@ -1485,6 +1469,7 @@ int cmd_top(int argc, const char **argv)
"Display raw encoding of assembly instructions (default)"),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
+ OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"),
OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
@@ -1516,6 +1501,7 @@ int cmd_top(int argc, const char **argv)
"number of thread to run event synthesize"),
OPT_END()
};
+ struct perf_evlist *sb_evlist = NULL;
const char * const top_usage[] = {
"perf top [<options>]",
NULL
@@ -1633,8 +1619,9 @@ int cmd_top(int argc, const char **argv)
annotation_config__init();
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
- if (symbol__init(NULL) < 0)
- return -1;
+ status = symbol__init(NULL);
+ if (status < 0)
+ goto out_delete_evlist;
sort__setup_elide(stdout);
@@ -1644,10 +1631,28 @@ int cmd_top(int argc, const char **argv)
signal(SIGWINCH, winch_sig);
}
+ top.session = perf_session__new(NULL, false, NULL);
+ if (top.session == NULL) {
+ status = -1;
+ goto out_delete_evlist;
+ }
+
+ if (!top.record_opts.no_bpf_event)
+ bpf_event__add_sb_event(&sb_evlist, &perf_env);
+
+ if (perf_evlist__start_sb_thread(sb_evlist, target)) {
+ pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
+ opts->no_bpf_event = true;
+ }
+
status = __cmd_top(&top);
+ if (!opts->no_bpf_event)
+ perf_evlist__stop_sb_thread(sb_evlist);
+
out_delete_evlist:
perf_evlist__delete(top.evlist);
+ perf_session__delete(top.session);
return status;
}
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index b36061cd1ab8..52fadc858ef0 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* builtin-trace.c
*
@@ -12,13 +13,12 @@
* Initially based on the 'trace' prototype by Thomas Gleixner:
*
* http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
- *
- * Released under the GPL v2. (and only v2, not any later version)
*/
#include <traceevent/event-parse.h>
#include <api/fs/tracing_path.h>
#include <bpf/bpf.h>
+#include "util/bpf_map.h"
#include "builtin.h"
#include "util/cgroup.h"
#include "util/color.h"
@@ -29,6 +29,8 @@
#include "util/evlist.h"
#include <subcmd/exec-cmd.h>
#include "util/machine.h"
+#include "util/map.h"
+#include "util/symbol.h"
#include "util/path.h"
#include "util/session.h"
#include "util/thread.h"
@@ -85,6 +87,9 @@ struct trace {
*augmented;
} events;
} syscalls;
+ struct {
+ struct bpf_map *map;
+ } dump;
struct record_opts opts;
struct perf_evlist *evlist;
struct machine *host;
@@ -1039,6 +1044,9 @@ static const size_t trace__entry_str_size = 2048;
static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
{
+ if (fd < 0)
+ return NULL;
+
if (fd > ttrace->files.max) {
struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
@@ -2766,7 +2774,8 @@ static int trace__set_filter_loop_pids(struct trace *trace)
if (parent == NULL)
break;
- if (!strcmp(thread__comm_str(parent), "sshd")) {
+ if (!strcmp(thread__comm_str(parent), "sshd") ||
+ strstarts(thread__comm_str(parent), "gnome-terminal")) {
pids[nr++] = parent->tid;
break;
}
@@ -2991,6 +3000,9 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0)
goto out_error_apply_filters;
+ if (trace->dump.map)
+ bpf_map__fprintf(trace->dump.map, trace->output);
+
err = perf_evlist__mmap(evlist, trace->opts.mmap_pages);
if (err < 0)
goto out_error_mmap;
@@ -3141,11 +3153,9 @@ static int trace__replay(struct trace *trace)
{ "probe:vfs_getname", trace__vfs_getname, },
};
struct perf_data data = {
- .file = {
- .path = input_name,
- },
- .mode = PERF_DATA_MODE_READ,
- .force = trace->force,
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = trace->force,
};
struct perf_session *session;
struct perf_evsel *evsel;
@@ -3680,6 +3690,7 @@ int cmd_trace(int argc, const char **argv)
.max_stack = UINT_MAX,
.max_events = ULONG_MAX,
};
+ const char *map_dump_str = NULL;
const char *output_name = NULL;
const struct option trace_options[] = {
OPT_CALLBACK('e', "event", &trace, "event",
@@ -3712,6 +3723,9 @@ int cmd_trace(int argc, const char **argv)
OPT_CALLBACK(0, "duration", &trace, "float",
"show only events with duration > N.M ms",
trace__set_duration),
+#ifdef HAVE_LIBBPF_SUPPORT
+ OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"),
+#endif
OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_BOOLEAN('T', "time", &trace.full_time,
@@ -3806,6 +3820,14 @@ int cmd_trace(int argc, const char **argv)
err = -1;
+ if (map_dump_str) {
+ trace.dump.map = bpf__find_map_by_name(map_dump_str);
+ if (trace.dump.map == NULL) {
+ pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str);
+ goto out;
+ }
+ }
+
if (trace.trace_pgfaults) {
trace.opts.sample_address = true;
trace.opts.sample_time = true;
@@ -3865,7 +3887,8 @@ int cmd_trace(int argc, const char **argv)
goto init_augmented_syscall_tp;
}
- if (strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_enter") == 0) {
+ if (trace.syscalls.events.augmented->priv == NULL &&
+ strstr(perf_evsel__name(evsel), "syscalls:sys_enter")) {
struct perf_evsel *augmented = trace.syscalls.events.augmented;
if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) ||
perf_evsel__init_augmented_syscall_tp_args(augmented))
diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c
index 50df168be326..f470144d1a70 100644
--- a/tools/perf/builtin-version.c
+++ b/tools/perf/builtin-version.c
@@ -78,6 +78,8 @@ static void library_status(void)
STATUS(HAVE_LZMA_SUPPORT, lzma);
STATUS(HAVE_AUXTRACE_SUPPORT, get_cpuid);
STATUS(HAVE_LIBBPF_SUPPORT, bpf);
+ STATUS(HAVE_AIO_SUPPORT, aio);
+ STATUS(HAVE_ZSTD_SUPPORT, zstd);
}
int cmd_version(int argc, const char **argv)
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index 05745f3ce912..999fe9170122 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -40,5 +40,6 @@ int cmd_mem(int argc, const char **argv);
int cmd_data(int argc, const char **argv);
int cmd_ftrace(int argc, const char **argv);
-int find_scripts(char **scripts_array, char **scripts_path_array);
+int find_scripts(char **scripts_array, char **scripts_path_array, int num,
+ int pathlen);
#endif
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 7b55613924de..c68ee06cae63 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -103,7 +103,7 @@ done
# diff with extra ignore lines
check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
-check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common.h>"'
+check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"'
check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"'
# diff non-symmetric files
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index a28dca2582aa..0453ba26cdbd 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -222,6 +222,10 @@ The 'exclude_user', 'exclude_kernel' and 'exclude_hv' bits provide a
way to request that counting of events be restricted to times when the
CPU is in user, kernel and/or hypervisor mode.
+Furthermore the 'exclude_host' and 'exclude_guest' bits provide a way
+to request counting of events restricted to guest and host contexts when
+using Linux as the hypervisor.
+
The 'mmap' and 'munmap' bits allow recording of PROT_EXEC mmap/munmap
operations, these can be used to relate userspace IP addresses to actual
code, even after the mapping (or even the whole process) is gone,
diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/examples/bpf/augmented_raw_syscalls.c
index 53c233370fae..2422894a8194 100644
--- a/tools/perf/examples/bpf/augmented_raw_syscalls.c
+++ b/tools/perf/examples/bpf/augmented_raw_syscalls.c
@@ -15,26 +15,17 @@
*/
#include <unistd.h>
+#include <linux/limits.h>
#include <pid_filter.h>
/* bpf-output associated map */
-struct bpf_map SEC("maps") __augmented_syscalls__ = {
- .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(u32),
- .max_entries = __NR_CPUS__,
-};
+bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
struct syscall {
bool enabled;
};
-struct bpf_map SEC("maps") syscalls = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(struct syscall),
- .max_entries = 512,
-};
+bpf_map(syscalls, ARRAY, int, struct syscall, 512);
struct syscall_enter_args {
unsigned long long common_tp_fields;
@@ -51,32 +42,110 @@ struct syscall_exit_args {
struct augmented_filename {
unsigned int size;
int reserved;
- char value[256];
+ char value[PATH_MAX];
};
-#define SYS_OPEN 2
-#define SYS_ACCESS 21
-#define SYS_OPENAT 257
+/* syscalls where the first arg is a string */
+#define SYS_OPEN 2
+#define SYS_STAT 4
+#define SYS_LSTAT 6
+#define SYS_ACCESS 21
+#define SYS_EXECVE 59
+#define SYS_TRUNCATE 76
+#define SYS_CHDIR 80
+#define SYS_RENAME 82
+#define SYS_MKDIR 83
+#define SYS_RMDIR 84
+#define SYS_CREAT 85
+#define SYS_LINK 86
+#define SYS_UNLINK 87
+#define SYS_SYMLINK 88
+#define SYS_READLINK 89
+#define SYS_CHMOD 90
+#define SYS_CHOWN 92
+#define SYS_LCHOWN 94
+#define SYS_MKNOD 133
+#define SYS_STATFS 137
+#define SYS_PIVOT_ROOT 155
+#define SYS_CHROOT 161
+#define SYS_ACCT 163
+#define SYS_SWAPON 167
+#define SYS_SWAPOFF 168
+#define SYS_DELETE_MODULE 176
+#define SYS_SETXATTR 188
+#define SYS_LSETXATTR 189
+#define SYS_GETXATTR 191
+#define SYS_LGETXATTR 192
+#define SYS_LISTXATTR 194
+#define SYS_LLISTXATTR 195
+#define SYS_REMOVEXATTR 197
+#define SYS_LREMOVEXATTR 198
+#define SYS_MQ_OPEN 240
+#define SYS_MQ_UNLINK 241
+#define SYS_ADD_KEY 248
+#define SYS_REQUEST_KEY 249
+#define SYS_SYMLINKAT 266
+#define SYS_MEMFD_CREATE 319
+
+/* syscalls where the first arg is a string */
+
+#define SYS_PWRITE64 18
+#define SYS_EXECVE 59
+#define SYS_RENAME 82
+#define SYS_QUOTACTL 179
+#define SYS_FSETXATTR 190
+#define SYS_FGETXATTR 193
+#define SYS_FREMOVEXATTR 199
+#define SYS_MQ_TIMEDSEND 242
+#define SYS_REQUEST_KEY 249
+#define SYS_INOTIFY_ADD_WATCH 254
+#define SYS_OPENAT 257
+#define SYS_MKDIRAT 258
+#define SYS_MKNODAT 259
+#define SYS_FCHOWNAT 260
+#define SYS_FUTIMESAT 261
+#define SYS_NEWFSTATAT 262
+#define SYS_UNLINKAT 263
+#define SYS_RENAMEAT 264
+#define SYS_LINKAT 265
+#define SYS_READLINKAT 267
+#define SYS_FCHMODAT 268
+#define SYS_FACCESSAT 269
+#define SYS_UTIMENSAT 280
+#define SYS_NAME_TO_HANDLE_AT 303
+#define SYS_FINIT_MODULE 313
+#define SYS_RENAMEAT2 316
+#define SYS_EXECVEAT 322
+#define SYS_STATX 332
pid_filter(pids_filtered);
+struct augmented_args_filename {
+ struct syscall_enter_args args;
+ struct augmented_filename filename;
+};
+
+bpf_map(augmented_filename_map, PERCPU_ARRAY, int, struct augmented_args_filename, 1);
+
SEC("raw_syscalls:sys_enter")
int sys_enter(struct syscall_enter_args *args)
{
- struct {
- struct syscall_enter_args args;
- struct augmented_filename filename;
- } augmented_args;
- struct syscall *syscall;
- unsigned int len = sizeof(augmented_args);
+ struct augmented_args_filename *augmented_args;
+ unsigned int len = sizeof(*augmented_args);
const void *filename_arg = NULL;
+ struct syscall *syscall;
+ int key = 0;
+
+ augmented_args = bpf_map_lookup_elem(&augmented_filename_map, &key);
+ if (augmented_args == NULL)
+ return 1;
if (pid_filter__has(&pids_filtered, getpid()))
return 0;
- probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
+ probe_read(&augmented_args->args, sizeof(augmented_args->args), args);
- syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
+ syscall = bpf_map_lookup_elem(&syscalls, &augmented_args->args.syscall_nr);
if (syscall == NULL || !syscall->enabled)
return 0;
/*
@@ -119,30 +188,105 @@ int sys_enter(struct syscall_enter_args *args)
*
* after the ctx memory access to prevent their down stream merging.
*/
- switch (augmented_args.args.syscall_nr) {
+ /*
+ * This table of what args are strings will be provided by userspace,
+ * in the syscalls map, i.e. we will already have to do the lookup to
+ * see if this specific syscall is filtered, so we can as well get more
+ * info about what syscall args are strings or pointers, and how many
+ * bytes to copy, per arg, etc.
+ *
+ * For now hard code it, till we have all the basic mechanisms in place
+ * to automate everything and make the kernel part be completely driven
+ * by information obtained in userspace for each kernel version and
+ * processor architecture, making the kernel part the same no matter what
+ * kernel version or processor architecture it runs on.
+ */
+ switch (augmented_args->args.syscall_nr) {
+ case SYS_ACCT:
+ case SYS_ADD_KEY:
+ case SYS_CHDIR:
+ case SYS_CHMOD:
+ case SYS_CHOWN:
+ case SYS_CHROOT:
+ case SYS_CREAT:
+ case SYS_DELETE_MODULE:
+ case SYS_EXECVE:
+ case SYS_GETXATTR:
+ case SYS_LCHOWN:
+ case SYS_LGETXATTR:
+ case SYS_LINK:
+ case SYS_LISTXATTR:
+ case SYS_LLISTXATTR:
+ case SYS_LREMOVEXATTR:
+ case SYS_LSETXATTR:
+ case SYS_LSTAT:
+ case SYS_MEMFD_CREATE:
+ case SYS_MKDIR:
+ case SYS_MKNOD:
+ case SYS_MQ_OPEN:
+ case SYS_MQ_UNLINK:
+ case SYS_PIVOT_ROOT:
+ case SYS_READLINK:
+ case SYS_REMOVEXATTR:
+ case SYS_RENAME:
+ case SYS_REQUEST_KEY:
+ case SYS_RMDIR:
+ case SYS_SETXATTR:
+ case SYS_STAT:
+ case SYS_STATFS:
+ case SYS_SWAPOFF:
+ case SYS_SWAPON:
+ case SYS_SYMLINK:
+ case SYS_SYMLINKAT:
+ case SYS_TRUNCATE:
+ case SYS_UNLINK:
case SYS_ACCESS:
case SYS_OPEN: filename_arg = (const void *)args->args[0];
__asm__ __volatile__("": : :"memory");
break;
+ case SYS_EXECVEAT:
+ case SYS_FACCESSAT:
+ case SYS_FCHMODAT:
+ case SYS_FCHOWNAT:
+ case SYS_FGETXATTR:
+ case SYS_FINIT_MODULE:
+ case SYS_FREMOVEXATTR:
+ case SYS_FSETXATTR:
+ case SYS_FUTIMESAT:
+ case SYS_INOTIFY_ADD_WATCH:
+ case SYS_LINKAT:
+ case SYS_MKDIRAT:
+ case SYS_MKNODAT:
+ case SYS_MQ_TIMEDSEND:
+ case SYS_NAME_TO_HANDLE_AT:
+ case SYS_NEWFSTATAT:
+ case SYS_PWRITE64:
+ case SYS_QUOTACTL:
+ case SYS_READLINKAT:
+ case SYS_RENAMEAT:
+ case SYS_RENAMEAT2:
+ case SYS_STATX:
+ case SYS_UNLINKAT:
+ case SYS_UTIMENSAT:
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
break;
}
if (filename_arg != NULL) {
- augmented_args.filename.reserved = 0;
- augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
- sizeof(augmented_args.filename.value),
+ augmented_args->filename.reserved = 0;
+ augmented_args->filename.size = probe_read_str(&augmented_args->filename.value,
+ sizeof(augmented_args->filename.value),
filename_arg);
- if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
- len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
- len &= sizeof(augmented_args.filename.value) - 1;
+ if (augmented_args->filename.size < sizeof(augmented_args->filename.value)) {
+ len -= sizeof(augmented_args->filename.value) - augmented_args->filename.size;
+ len &= sizeof(augmented_args->filename.value) - 1;
}
} else {
- len = sizeof(augmented_args.args);
+ len = sizeof(augmented_args->args);
}
- perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
- return 0;
+ /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
+ return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, augmented_args, len);
}
SEC("raw_syscalls:sys_exit")
diff --git a/tools/perf/examples/bpf/augmented_syscalls.c b/tools/perf/examples/bpf/augmented_syscalls.c
index 2ae44813ef2d..524fdb8534b3 100644
--- a/tools/perf/examples/bpf/augmented_syscalls.c
+++ b/tools/perf/examples/bpf/augmented_syscalls.c
@@ -19,12 +19,8 @@
#include <stdio.h>
#include <linux/socket.h>
-struct bpf_map SEC("maps") __augmented_syscalls__ = {
- .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(u32),
- .max_entries = __NR_CPUS__,
-};
+/* bpf-output associated map */
+bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
struct syscall_exit_args {
unsigned long long common_tp_fields;
@@ -55,9 +51,9 @@ int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size; \
len &= sizeof(augmented_args.filename.value) - 1; \
} \
- perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
- &augmented_args, len); \
- return 0; \
+ /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
+ return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
+ &augmented_args, len); \
} \
int syscall_exit(syscall)(struct syscall_exit_args *args) \
{ \
@@ -125,10 +121,10 @@ int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
/* addrlen = augmented_args.args.addrlen; */ \
/* */ \
probe_read(&augmented_args.addr, addrlen, args->addr_ptr); \
- perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
- &augmented_args, \
- sizeof(augmented_args) - sizeof(augmented_args.addr) + addrlen); \
- return 0; \
+ /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
+ return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
+ &augmented_args, \
+ sizeof(augmented_args) - sizeof(augmented_args.addr) + addrlen);\
} \
int syscall_exit(syscall)(struct syscall_exit_args *args) \
{ \
diff --git a/tools/perf/examples/bpf/etcsnoop.c b/tools/perf/examples/bpf/etcsnoop.c
index b59e8812ee8c..e81b535346c0 100644
--- a/tools/perf/examples/bpf/etcsnoop.c
+++ b/tools/perf/examples/bpf/etcsnoop.c
@@ -21,12 +21,8 @@
#include <stdio.h>
-struct bpf_map SEC("maps") __augmented_syscalls__ = {
- .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(u32),
- .max_entries = __NR_CPUS__,
-};
+/* bpf-output associated map */
+bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
struct augmented_filename {
int size;
@@ -49,11 +45,11 @@ int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
args->filename_ptr); \
if (__builtin_memcmp(augmented_args.filename.value, etc, 4) != 0) \
return 0; \
- perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
- &augmented_args, \
- (sizeof(augmented_args) - sizeof(augmented_args.filename.value) + \
- augmented_args.filename.size)); \
- return 0; \
+ /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
+ return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
+ &augmented_args, \
+ (sizeof(augmented_args) - sizeof(augmented_args.filename.value) + \
+ augmented_args.filename.size)); \
}
struct syscall_enter_openat_args {
diff --git a/tools/perf/include/bpf/bpf.h b/tools/perf/include/bpf/bpf.h
index e667577207dc..2eac6d804b2d 100644
--- a/tools/perf/include/bpf/bpf.h
+++ b/tools/perf/include/bpf/bpf.h
@@ -18,6 +18,20 @@ struct bpf_map {
unsigned int numa_node;
};
+#define bpf_map(name, _type, type_key, type_val, _max_entries) \
+struct bpf_map SEC("maps") name = { \
+ .type = BPF_MAP_TYPE_##_type, \
+ .key_size = sizeof(type_key), \
+ .value_size = sizeof(type_val), \
+ .max_entries = _max_entries, \
+}; \
+struct ____btf_map_##name { \
+ type_key key; \
+ type_val value; \
+}; \
+struct ____btf_map_##name __attribute__((section(".maps." #name), used)) \
+ ____btf_map_##name = { }
+
/*
* FIXME: this should receive .max_entries as a parameter, as careful
* tuning of these limits is needed to avoid hitting limits that
@@ -26,13 +40,7 @@ struct bpf_map {
* For the current need, 'perf trace --filter-pids', 64 should
* be good enough, but this surely needs to be revisited.
*/
-#define pid_map(name, value_type) \
-struct bpf_map SEC("maps") name = { \
- .type = BPF_MAP_TYPE_HASH, \
- .key_size = sizeof(pid_t), \
- .value_size = sizeof(value_type), \
- .max_entries = 64, \
-}
+#define pid_map(name, value_type) bpf_map(name, HASH, pid_t, value_type, 64)
static int (*bpf_map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags) = (void *)BPF_FUNC_map_update_elem;
static void *(*bpf_map_lookup_elem)(struct bpf_map *map, void *key) = (void *)BPF_FUNC_map_lookup_elem;
diff --git a/tools/perf/perf-with-kcore.sh b/tools/perf/perf-with-kcore.sh
index 7e47a7cbc195..74e4627ca278 100644
--- a/tools/perf/perf-with-kcore.sh
+++ b/tools/perf/perf-with-kcore.sh
@@ -1,15 +1,8 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
# perf-with-kcore: use perf with a copy of kcore
# Copyright (c) 2014, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
set -e
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index a11cb006f968..72df4b6fa36f 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -298,6 +298,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
use_pager = 1;
commit_pager_choice();
+ perf_env__init(&perf_env);
perf_env__set_cmdline(&perf_env, argc, argv);
status = p->fn(argc, argv);
perf_config__exit();
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 388c6dd128b8..d59dee61b64d 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -66,6 +66,7 @@ struct record_opts {
bool ignore_missing_thread;
bool strict_freq;
bool sample_id;
+ bool no_bpf_event;
unsigned int freq;
unsigned int mmap_pages;
unsigned int auxtrace_mmap_pages;
@@ -83,6 +84,16 @@ struct record_opts {
clockid_t clockid;
u64 clockid_res_ns;
int nr_cblocks;
+ int affinity;
+ int mmap_flush;
+ unsigned int comp_level;
+};
+
+enum perf_affinity {
+ PERF_AFFINITY_SYS = 0,
+ PERF_AFFINITY_NODE,
+ PERF_AFFINITY_CPU,
+ PERF_AFFINITY_MAX
};
struct option;
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json
new file mode 100644
index 000000000000..0ac9b7927450
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json
@@ -0,0 +1,179 @@
+[
+ {
+ "ArchStdEvent": "L1D_CACHE_RD",
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR",
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD",
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR",
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM",
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN",
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL",
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD",
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR",
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD",
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR",
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD",
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR",
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM",
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN",
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL",
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD",
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR",
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_SHARED",
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_NOT_SHARED",
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_NORMAL",
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_PERIPH",
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD",
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR",
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC",
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC",
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC",
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC",
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC",
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC",
+ },
+ {
+ "ArchStdEvent": "LD_SPEC",
+ },
+ {
+ "ArchStdEvent": "ST_SPEC",
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC",
+ },
+ {
+ "ArchStdEvent": "DP_SPEC",
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC",
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC",
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC",
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC",
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC",
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC",
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC",
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC",
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC",
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC",
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF",
+ },
+ {
+ "ArchStdEvent": "EXC_SVC",
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT",
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT",
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ",
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ",
+ },
+ {
+ "ArchStdEvent": "EXC_SMC",
+ },
+ {
+ "ArchStdEvent": "EXC_HVC",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ",
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ",
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC",
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC",
+ },
+]
diff --git a/tools/perf/pmu-events/arch/arm64/mapfile.csv b/tools/perf/pmu-events/arch/arm64/mapfile.csv
index 59cd8604b0bd..927fcddcb4aa 100644
--- a/tools/perf/pmu-events/arch/arm64/mapfile.csv
+++ b/tools/perf/pmu-events/arch/arm64/mapfile.csv
@@ -12,7 +12,10 @@
#
#
#Family-model,Version,Filename,EventType
-0x00000000410fd03[[:xdigit:]],v1,arm/cortex-a53,core
+0x00000000410fd030,v1,arm/cortex-a53,core
+0x00000000420f1000,v1,arm/cortex-a53,core
+0x00000000410fd070,v1,arm/cortex-a57-a72,core
+0x00000000410fd080,v1,arm/cortex-a57-a72,core
0x00000000420f5160,v1,cavium/thunderx2,core
0x00000000430f0af0,v1,cavium/thunderx2,core
0x00000000480fd010,v1,hisilicon/hip08,core
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/metrics.json b/tools/perf/pmu-events/arch/powerpc/power8/metrics.json
new file mode 100644
index 000000000000..bffb2d4a6420
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power8/metrics.json
@@ -0,0 +1,2245 @@
+[
+ {
+ "BriefDescription": "% of finished branches that were treated as BC+8",
+ "MetricExpr": "PM_BR_BC_8_CONV / PM_BRU_FIN * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "bc_8_branch_ratio_percent"
+ },
+ {
+ "BriefDescription": "% of finished branches that were pairable but not treated as BC+8",
+ "MetricExpr": "PM_BR_BC_8 / PM_BRU_FIN * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "bc_8_not_converted_branch_ratio_percent"
+ },
+ {
+ "BriefDescription": "Percent of mispredicted branches out of all predicted (correctly and incorrectly) branches that completed",
+ "MetricExpr": "PM_BR_MPRED_CMPL / (PM_BR_PRED_BR0 + PM_BR_PRED_BR1) * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "br_misprediction_percent"
+ },
+ {
+ "BriefDescription": "% of Branch miss predictions per instruction",
+ "MetricExpr": "PM_BR_MPRED_CMPL / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "branch_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Count cache branch misprediction per instruction",
+ "MetricExpr": "PM_BR_MPRED_CCACHE / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ccache_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Percent of count catch mispredictions out of all completed branches that required count cache predictionn",
+ "MetricExpr": "PM_BR_MPRED_CCACHE / (PM_BR_PRED_CCACHE_BR0 + PM_BR_PRED_CCACHE_BR1) * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ccache_misprediction_percent"
+ },
+ {
+ "BriefDescription": "CR MisPredictions per Instruction",
+ "MetricExpr": "PM_BR_MPRED_CR / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "cr_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Link stack branch misprediction",
+ "MetricExpr": "(PM_BR_MPRED_TA - PM_BR_MPRED_CCACHE) / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "lstack_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Percent of link stack mispredictions out of all completed branches that required link stack prediction",
+ "MetricExpr": "(PM_BR_MPRED_TA - PM_BR_MPRED_CCACHE) / (PM_BR_PRED_LSTACK_BR0 + PM_BR_PRED_LSTACK_BR1) * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "lstack_misprediction_percent"
+ },
+ {
+ "BriefDescription": "TA MisPredictions per Instruction",
+ "MetricExpr": "PM_BR_MPRED_TA / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ta_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Percent of target address mispredictions out of all completed branches that required address prediction",
+ "MetricExpr": "PM_BR_MPRED_TA / (PM_BR_PRED_CCACHE_BR0 + PM_BR_PRED_CCACHE_BR1 + PM_BR_PRED_LSTACK_BR0 + PM_BR_PRED_LSTACK_BR1) * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ta_misprediction_percent"
+ },
+ {
+ "BriefDescription": "Percent of branches completed that were taken",
+ "MetricExpr": "PM_BR_TAKEN_CMPL * 100 / PM_BR_CMPL",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "taken_branches_percent"
+ },
+ {
+ "BriefDescription": "Percent of chip+group+sys pumps that were incorrectly predicted",
+ "MetricExpr": "PM_PUMP_MPRED * 100 / (PM_PUMP_CPRED + PM_PUMP_MPRED)",
+ "MetricGroup": "bus_stats",
+ "MetricName": "any_pump_mpred_percent"
+ },
+ {
+ "BriefDescription": "Percent of chip pumps that were correctly predicted as chip pumps the first time",
+ "MetricExpr": "PM_CHIP_PUMP_CPRED * 100 / PM_L2_CHIP_PUMP",
+ "MetricGroup": "bus_stats",
+ "MetricName": "chip_pump_cpred_percent"
+ },
+ {
+ "BriefDescription": "Percent of group pumps that were correctly predicted as group pumps the first time",
+ "MetricExpr": "PM_GRP_PUMP_CPRED * 100 / PM_L2_GROUP_PUMP",
+ "MetricGroup": "bus_stats",
+ "MetricName": "group_pump_cpred_percent"
+ },
+ {
+ "BriefDescription": "Percent of system pumps that were correctly predicted as group pumps the first time",
+ "MetricExpr": "PM_SYS_PUMP_CPRED * 100 / PM_L2_GROUP_PUMP",
+ "MetricGroup": "bus_stats",
+ "MetricName": "sys_pump_cpred_percent"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to CRU or BRU operations",
+ "MetricExpr": "PM_CMPLU_STALL_BRU_CRU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "bru_cru_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to ISU Branch Operations",
+ "MetricExpr": "PM_CMPLU_STALL_BRU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "bru_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which a Group Completed",
+ "MetricExpr": "PM_GRP_CMPL / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "completion_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by CO queue full",
+ "MetricExpr": "PM_CMPLU_STALL_COQ_FULL / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "coq_full_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to CRU Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_BRU_CRU - PM_CMPLU_STALL_BRU) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "cru_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by flushes",
+ "MetricExpr": "PM_CMPLU_STALL_FLUSH / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "flush_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by FXU Multi-Cycle Instructions",
+ "MetricExpr": "PM_CMPLU_STALL_FXLONG / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_multi_cyc_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by FXU",
+ "MetricExpr": "PM_CMPLU_STALL_FXU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Other cycles stalled by FXU",
+ "MetricExpr": "(PM_CMPLU_STALL_FXU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_FXLONG / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_stall_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty due to Branch Mispredicts",
+ "MetricExpr": "PM_GCT_NOSLOT_BR_MPRED / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_br_mpred_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty due to Branch Mispredicts and Icache Misses",
+ "MetricExpr": "PM_GCT_NOSLOT_BR_MPRED_ICMISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_br_mpred_ic_miss_cpi"
+ },
+ {
+ "BriefDescription": "GCT empty cycles",
+ "MetricExpr": "PM_GCT_NOSLOT_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held",
+ "MetricExpr": "(PM_GCT_NOSLOT_DISP_HELD_MAP + PM_GCT_NOSLOT_DISP_HELD_SRQ + PM_GCT_NOSLOT_DISP_HELD_ISSQ + PM_GCT_NOSLOT_DISP_HELD_OTHER) / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held due to issue queue",
+ "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_ISSQ / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_issq_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held due to maps",
+ "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_MAP / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_map_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held due to syncs and other effects",
+ "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_OTHER / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles GCT empty where dispatch was held due to SRQ",
+ "MetricExpr": "PM_GCT_NOSLOT_DISP_HELD_SRQ / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_disp_held_srq_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by GCT empty due to Icache misses",
+ "MetricExpr": "PM_GCT_NOSLOT_IC_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_ic_miss_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by GCT empty due to Icache misses that resolve in the local L2 or L3",
+ "MetricExpr": "(PM_GCT_NOSLOT_IC_MISS - PM_GCT_NOSLOT_IC_L3MISS) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_ic_miss_l2l3_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by GCT empty due to Icache misses that resolve off-chip",
+ "MetricExpr": "PM_GCT_NOSLOT_IC_L3MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_ic_miss_l3miss_cpi"
+ },
+ {
+ "BriefDescription": "Other GCT empty cycles",
+ "MetricExpr": "(PM_GCT_NOSLOT_CYC / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_IC_MISS / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_BR_MPRED / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_BR_MPRED_ICMISS / PM_RUN_INST_CMPL) - ((PM_GCT_NOSLOT_DISP_HELD_MAP / PM_RUN_INST_CMPL) + (PM_GCT_NOSLOT_DISP_HELD_SRQ / PM_RUN_INST_CMPL) + (PM_GCT_NOSLOT_DISP_HELD_ISSQ / PM_RUN_INST_CMPL) + (PM_GCT_NOSLOT_DISP_HELD_OTHER / PM_RUN_INST_CMPL))",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "gct_empty_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by heavyweight syncs",
+ "MetricExpr": "PM_CMPLU_STALL_HWSYNC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "hwsync_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU",
+ "MetricExpr": "PM_CMPLU_STALL_LSU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses",
+ "MetricExpr": "PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in distant interventions and memory",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L3MISS - PM_CMPLU_STALL_DMISS_LMEM - PM_CMPLU_STALL_DMISS_L21_L31 - PM_CMPLU_STALL_DMISS_REMOTE) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_distant_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in remote or distant caches",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L21_L31 / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l21l31_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in the local L2 or L3, where there was a conflict",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3_CONFLICT / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l2l3_conflict_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in the local L2 or L3",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3 / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l2l3_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in the local L2 or L3, where there was no conflict",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L2L3 - PM_CMPLU_STALL_DMISS_L2L3_CONFLICT) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l2l3_noconflict_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in other core's caches or memory",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L3MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_l3miss_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in local memory or local L4",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_LMEM / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_lmem_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses that resolved in remote interventions and memory",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_REMOTE / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_dcache_miss_remote_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by ERAT Translation rejects",
+ "MetricExpr": "PM_CMPLU_STALL_ERAT_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_erat_miss_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU load finishes",
+ "MetricExpr": "PM_CMPLU_STALL_LOAD_FINISH / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_ld_fin_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LHS rejects",
+ "MetricExpr": "PM_CMPLU_STALL_REJECT_LHS / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_lhs_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LMQ Full rejects",
+ "MetricExpr": "PM_CMPLU_STALL_REJ_LMQ_FULL / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_lmq_full_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by Other LSU Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_LSU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_REJECT / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_STORE / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_LOAD_FINISH / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_ST_FWD / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU Rejects",
+ "MetricExpr": "PM_CMPLU_STALL_REJECT / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_reject_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by Other LSU Rejects",
+ "MetricExpr": "(PM_CMPLU_STALL_REJECT / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_REJECT_LHS / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_ERAT_MISS / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_REJ_LMQ_FULL / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_reject_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU store forwarding",
+ "MetricExpr": "PM_CMPLU_STALL_ST_FWD / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_st_fwd_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by LSU Stores",
+ "MetricExpr": "PM_CMPLU_STALL_STORE / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_store_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by lightweight syncs",
+ "MetricExpr": "PM_CMPLU_STALL_LWSYNC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lwsync_stall_cpi"
+ },
+ {
+ "MetricExpr": "PM_CMPLU_STALL_MEM_ECC_DELAY / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "mem_ecc_delay_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by nops (nothing next to finish)",
+ "MetricExpr": "PM_CMPLU_STALL_NO_NTF / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "no_ntf_stall_cpi"
+ },
+ {
+ "MetricExpr": "PM_NTCG_ALL_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntcg_all_fin_cpi"
+ },
+ {
+ "MetricExpr": "PM_CMPLU_STALL_NTCG_FLUSH / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntcg_flush_cpi"
+ },
+ {
+ "BriefDescription": "Other thread block stall cycles",
+ "MetricExpr": "(PM_CMPLU_STALL_THRD - PM_CMPLU_STALL_LWSYNC - PM_CMPLU_STALL_HWSYNC - PM_CMPLU_STALL_MEM_ECC_DELAY - PM_CMPLU_STALL_FLUSH - PM_CMPLU_STALL_COQ_FULL) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_block_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles unaccounted for",
+ "MetricExpr": "(PM_RUN_CYC / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL / PM_RUN_INST_CMPL) - (PM_GCT_NOSLOT_CYC / PM_RUN_INST_CMPL) - (PM_NTCG_ALL_FIN / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_THRD / PM_RUN_INST_CMPL) - (PM_GRP_CMPL / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_cpi"
+ },
+ {
+ "BriefDescription": "Stall cycles unaccounted for",
+ "MetricExpr": "(PM_CMPLU_STALL / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_BRU_CRU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_FXU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_VSU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_LSU / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_NTCG_FLUSH / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_NO_NTF / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Run cycles per run instruction",
+ "MetricExpr": "PM_RUN_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "run_cpi"
+ },
+ {
+ "BriefDescription": "Completion Stall Cycles",
+ "MetricExpr": "PM_CMPLU_STALL / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles a thread was blocked",
+ "MetricExpr": "PM_CMPLU_STALL_THRD / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "thread_block_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU",
+ "MetricExpr": "PM_CMPLU_STALL_VSU / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by other VSU Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_VSU - PM_CMPLU_STALL_VECTOR - PM_CMPLU_STALL_SCALAR) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU Scalar Operations",
+ "MetricExpr": "PM_CMPLU_STALL_SCALAR / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_scalar_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU Scalar Long Operations",
+ "MetricExpr": "PM_CMPLU_STALL_SCALAR_LONG / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_scalar_long_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by Other VSU Scalar Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_SCALAR / PM_RUN_INST_CMPL) - (PM_CMPLU_STALL_SCALAR_LONG / PM_RUN_INST_CMPL)",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_scalar_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU Vector Operations",
+ "MetricExpr": "PM_CMPLU_STALL_VECTOR / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_vector_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by VSU Vector Long Operations",
+ "MetricExpr": "PM_CMPLU_STALL_VECTOR_LONG / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_vector_long_cpi"
+ },
+ {
+ "BriefDescription": "Cycles stalled by other VSU Vector Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_VECTOR - PM_CMPLU_STALL_VECTOR_LONG) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vsu_stall_vector_other_cpi"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L4 per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 load hits per instruction where the L2 experienced a Load-Hit-Store conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_lhs_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 load hits per instruction where the L2 did not experience a conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_NO_CONFLICT * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_no_conflict_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 load hits per instruction where the L2 experienced some conflict other than Load-Hit-Store",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_other_conflict_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3 M state, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3 S tate, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L3 load hits per instruction where the load collided with a pending prefetch",
+ "MetricExpr": "PM_DATA_FROM_L3_DISP_CONFLICT * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_conflict_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L3 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L3 load hits per instruction where the L3 did not experience a conflict",
+ "MetricExpr": "PM_DATA_FROM_L3_NO_CONFLICT * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_no_conflict_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from L3 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Local L4 per Inst",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Local Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 demand load misses per run instruction",
+ "MetricExpr": "PM_LD_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "l1_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 misses that result in a cache reload",
+ "MetricExpr": "PM_L1_DCACHE_RELOAD_VALID * 100 / PM_LD_MISS_L1",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_miss_reloads_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L4",
+ "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant Memory",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L2 with a Load-Hit-Store conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_lhs_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L2 with no conflicts",
+ "MetricExpr": "PM_DATA_FROM_L2_NO_CONFLICT * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_no_conflict_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L2 with some conflict other than Load-Hit-Store",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_other_conflict_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L3 where the load collided with a pending prefetch",
+ "MetricExpr": "PM_DATA_FROM_L3_DISP_CONFLICT * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_conflict_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L3 load hits per instruction where the line was brought into the L3 by a prefetch operation",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_mepf_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L3 without conflicts",
+ "MetricExpr": "PM_DATA_FROM_L3_NO_CONFLICT * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_no_conflict_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from L3",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Local L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Local Memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L4",
+ "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote Memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "dL1 miss portion of CPI",
+ "MetricExpr": "( (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)/ (PM_RUN_CYC / PM_RUN_INST_CMPL)) * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dcache_miss_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 distant MOD miss rates with measured DL2L3 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_DL2L3_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DL2L3_MOD_CYC/ PM_MRK_DATA_FROM_DL2L3_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl2l3_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 distant SHR miss rates with measured DL2L3 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_DL2L3_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DL2L3_SHR_CYC/ PM_MRK_DATA_FROM_DL2L3_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl2l3_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of distant L4 miss rates with measured DL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_DL4 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DL4_CYC/ PM_MRK_DATA_FROM_DL4)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of distant memory miss rates with measured DMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_DMEM / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_DMEM_CYC/ PM_MRK_DATA_FROM_DMEM)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl21 MOD miss rates with measured L21 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L21_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L21_MOD_CYC/ PM_MRK_DATA_FROM_L21_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l21_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl21 SHR miss rates with measured L21 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L21_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L21_SHR_CYC/ PM_MRK_DATA_FROM_L21_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l21_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2 miss rates with measured L2 latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L2 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L2_CYC/ PM_MRK_DATA_FROM_L2)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL) ) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l2_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl31 MOD miss rates with measured L31 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L31_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L31_MOD_CYC/ PM_MRK_DATA_FROM_L31_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l31_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl31 SHR miss rates with measured L31 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L31_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L31_SHR_CYC/ PM_MRK_DATA_FROM_L31_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l31_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl3 miss rates with measured L3 latency as a % of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_L3 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_L3_CYC/ PM_MRK_DATA_FROM_L3)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l3_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of Local L4 miss rates with measured LL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_LL4 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_LL4_CYC/ PM_MRK_DATA_FROM_LL4)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "ll4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of Local memory miss rates with measured LMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_LMEM / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_LMEM_CYC/ PM_MRK_DATA_FROM_LMEM)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "lmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 remote MOD miss rates with measured RL2L3 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_RL2L3_MOD / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RL2L3_MOD_CYC/ PM_MRK_DATA_FROM_RL2L3_MOD)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl2l3_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 shared miss rates with measured RL2L3 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_RL2L3_SHR / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RL2L3_SHR_CYC/ PM_MRK_DATA_FROM_RL2L3_SHR)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl2l3_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of remote L4 miss rates with measured RL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_RL4 / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RL4_CYC/ PM_MRK_DATA_FROM_RL4)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of remote memory miss rates with measured RMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "(((PM_DATA_FROM_RMEM / PM_RUN_INST_CMPL) * (PM_MRK_DATA_FROM_RMEM_CYC/ PM_MRK_DATA_FROM_RMEM)) / (PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL)) *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "Branch Mispredict flushes per instruction",
+ "MetricExpr": "PM_FLUSH_BR_MPRED / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "general",
+ "MetricName": "br_mpred_flush_rate_percent"
+ },
+ {
+ "BriefDescription": "Cycles per instruction",
+ "MetricExpr": "PM_CYC / PM_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "cpi"
+ },
+ {
+ "BriefDescription": "Percentage Cycles a group completed",
+ "MetricExpr": "PM_GRP_CMPL / PM_CYC * 100",
+ "MetricGroup": "general",
+ "MetricName": "cyc_grp_completed_percent"
+ },
+ {
+ "BriefDescription": "Percentage Cycles a group dispatched",
+ "MetricExpr": "PM_1PLUS_PPC_DISP / PM_CYC * 100",
+ "MetricGroup": "general",
+ "MetricName": "cyc_grp_dispatched_percent"
+ },
+ {
+ "BriefDescription": "Cycles per group",
+ "MetricExpr": "PM_CYC / PM_1PLUS_PPC_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "cyc_per_group"
+ },
+ {
+ "BriefDescription": "GCT empty cycles",
+ "MetricExpr": "(PM_FLUSH_DISP / PM_RUN_INST_CMPL) * 100",
+ "MetricGroup": "general",
+ "MetricName": "disp_flush_rate_percent"
+ },
+ {
+ "BriefDescription": "% DTLB miss rate per inst",
+ "MetricExpr": "PM_DTLB_MISS / PM_RUN_INST_CMPL *100",
+ "MetricGroup": "general",
+ "MetricName": "dtlb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Flush rate (%)",
+ "MetricExpr": "PM_FLUSH * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "flush_rate_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (11 to 14) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_11_14_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_11to14_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (15 to 17) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_15_17_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_15to17_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization 18+ as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_18_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_18plus_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (1 to 2) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_1_2_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_1to2_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (3 to 6) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_3_6_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_3to6_slots_percent"
+ },
+ {
+ "BriefDescription": "GCT slot utilization (7 to 10) as a % of cycles this thread had atleast 1 slot valid",
+ "MetricExpr": "PM_GCT_UTIL_7_10_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "gct_util_7to10_slots_percent"
+ },
+ {
+ "BriefDescription": "Avg. group size",
+ "MetricExpr": "PM_INST_CMPL / PM_1PLUS_PPC_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "group_size"
+ },
+ {
+ "BriefDescription": "Instructions per group",
+ "MetricExpr": "PM_INST_CMPL / PM_1PLUS_PPC_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "inst_per_group"
+ },
+ {
+ "BriefDescription": "Instructions per cycles",
+ "MetricExpr": "PM_INST_CMPL / PM_CYC",
+ "MetricGroup": "general",
+ "MetricName": "ipc"
+ },
+ {
+ "BriefDescription": "% ITLB miss rate per inst",
+ "MetricExpr": "PM_ITLB_MISS / PM_RUN_INST_CMPL *100",
+ "MetricGroup": "general",
+ "MetricName": "itlb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 load misses per L1 load ref",
+ "MetricExpr": "PM_LD_MISS_L1 / PM_LD_REF_L1 * 100",
+ "MetricGroup": "general",
+ "MetricName": "l1_ld_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 store misses per run instruction",
+ "MetricExpr": "PM_ST_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l1_st_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 store misses per L1 store ref",
+ "MetricExpr": "PM_ST_MISS_L1 / PM_ST_FIN * 100",
+ "MetricGroup": "general",
+ "MetricName": "l1_st_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "L2 Instruction Miss Rate (per instruction)(%)",
+ "MetricExpr": "PM_INST_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 dmand Load Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 PTEG Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DPTEG_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_pteg_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 store misses per run instruction",
+ "MetricExpr": "PM_ST_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_st_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 Instruction Miss Rate (per instruction)(%)",
+ "MetricExpr": "PM_INST_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 demand Load Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 PTEG Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DPTEG_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_pteg_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Run cycles per cycle",
+ "MetricExpr": "PM_RUN_CYC / PM_CYC*100",
+ "MetricGroup": "general",
+ "MetricName": "run_cycles_percent"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in SMT2 Mode",
+ "MetricExpr": "(PM_RUN_CYC_SMT2_MODE/PM_RUN_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "smt2_cycles_percent"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in SMT4 Mode",
+ "MetricExpr": "(PM_RUN_CYC_SMT4_MODE/PM_RUN_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "smt4_cycles_percent"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in SMT8 Mode",
+ "MetricExpr": "(PM_RUN_CYC_SMT8_MODE/PM_RUN_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "smt8_cycles_percent"
+ },
+ {
+ "BriefDescription": "IPC of all instructions completed by the core while this thread was stalled",
+ "MetricExpr": "PM_CMPLU_STALL_OTHER_CMPL/PM_RUN_CYC",
+ "MetricGroup": "general",
+ "MetricName": "smt_benefit"
+ },
+ {
+ "BriefDescription": "Instruction dispatch-to-completion ratio",
+ "MetricExpr": "PM_INST_DISP / PM_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "speculation"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in Single Thread Mode",
+ "MetricExpr": "(PM_RUN_CYC_ST_MODE/PM_RUN_CYC) * 100",
+ "MetricGroup": "general",
+ "MetricName": "st_cycles_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L2 per Inst",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3 other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L3 per Inst",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Instruction Cache Miss Rate (Per run Instruction)(%)",
+ "MetricExpr": "PM_L1_ICACHE_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "l1_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% Branches per instruction",
+ "MetricExpr": "PM_BRU_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "branches_per_inst"
+ },
+ {
+ "BriefDescription": "Total Fixed point operations",
+ "MetricExpr": "(PM_FXU0_FIN + PM_FXU1_FIN)/PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fixed_per_inst"
+ },
+ {
+ "BriefDescription": "FXU0 balance",
+ "MetricExpr": "PM_FXU0_FIN / (PM_FXU0_FIN + PM_FXU1_FIN)",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu0_balance"
+ },
+ {
+ "BriefDescription": "Fraction of cycles that FXU0 is in use",
+ "MetricExpr": "PM_FXU0_FIN / PM_RUN_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu0_fin"
+ },
+ {
+ "BriefDescription": "FXU0 only Busy",
+ "MetricExpr": "PM_FXU0_BUSY_FXU1_IDLE / PM_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu0_only_busy"
+ },
+ {
+ "BriefDescription": "Fraction of cycles that FXU1 is in use",
+ "MetricExpr": "PM_FXU1_FIN / PM_RUN_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu1_fin"
+ },
+ {
+ "BriefDescription": "FXU1 only Busy",
+ "MetricExpr": "PM_FXU1_BUSY_FXU0_IDLE / PM_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu1_only_busy"
+ },
+ {
+ "BriefDescription": "Both FXU Busy",
+ "MetricExpr": "PM_FXU_BUSY / PM_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu_both_busy"
+ },
+ {
+ "BriefDescription": "Both FXU Idle",
+ "MetricExpr": "PM_FXU_IDLE / PM_CYC",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "fxu_both_idle"
+ },
+ {
+ "BriefDescription": "PCT instruction loads",
+ "MetricExpr": "PM_LD_REF_L1 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "loads_per_inst"
+ },
+ {
+ "BriefDescription": "PCT instruction stores",
+ "MetricExpr": "PM_ST_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_mix",
+ "MetricName": "stores_per_inst"
+ },
+ {
+ "BriefDescription": "Icache Fetchs per Icache Miss",
+ "MetricExpr": "(PM_L1_ICACHE_MISS - PM_IC_PREF_WRITE) / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "icache_miss_reload"
+ },
+ {
+ "BriefDescription": "% of ICache reloads due to prefetch",
+ "MetricExpr": "PM_IC_PREF_WRITE * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "icache_pref_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L4",
+ "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant Memory",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core",
+ "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core",
+ "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L2",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core",
+ "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core",
+ "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L3",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local L4",
+ "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local Memory",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L4",
+ "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote Memory",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "Average number of stores that gather in the store buffer before being sent to an L2 RC machine",
+ "MetricExpr": "PM_ST_CMPL / (PM_L2_ST / 2)",
+ "MetricGroup": "l2_stats",
+ "MetricName": "avg_stores_gathered"
+ },
+ {
+ "BriefDescription": "L2 Store misses as a % of total L2 Store dispatches (per thread)",
+ "MetricExpr": "PM_L2_ST_MISS / PM_L2_ST * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_st_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L2 store misses per drained store. A drained store may contain multiple individual stores if they target the same line",
+ "MetricExpr": "PM_L2_ST_MISS / (PM_L2_ST / 2)",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_store_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "average L1 miss latency using marked events",
+ "MetricExpr": "PM_MRK_LD_MISS_L1_CYC / PM_MRK_LD_MISS_L1",
+ "MetricGroup": "latency",
+ "MetricName": "average_dl1miss_latency"
+ },
+ {
+ "BriefDescription": "Average icache miss latency",
+ "MetricExpr": "(PM_IC_DEMAND_CYC / PM_IC_DEMAND_REQ)",
+ "MetricGroup": "latency",
+ "MetricName": "average_il1_miss_latency"
+ },
+ {
+ "BriefDescription": "average service time for SYNC",
+ "MetricExpr": "PM_LSU_SRQ_SYNC_CYC / PM_LSU_SRQ_SYNC",
+ "MetricGroup": "latency",
+ "MetricName": "average_sync_cyc"
+ },
+ {
+ "BriefDescription": "Cycles LMQ slot0 was active on an average",
+ "MetricExpr": "PM_LSU_LMQ_S0_VALID / PM_LSU_LMQ_S0_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_lmq_life_time"
+ },
+ {
+ "BriefDescription": "Average number of cycles LRQ stays active for one load. Slot 0 is VALID ONLY FOR EVEN THREADS",
+ "MetricExpr": "PM_LSU_LRQ_S0_VALID / PM_LSU_LRQ_S0_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_lrq_life_time_even"
+ },
+ {
+ "BriefDescription": "Average number of cycles LRQ stays active for one load. Slot 43 is valid ONLY FOR ODD THREADS",
+ "MetricExpr": "PM_LSU_LRQ_S43_VALID / PM_LSU_LRQ_S43_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_lrq_life_time_odd"
+ },
+ {
+ "BriefDescription": "Average number of cycles SRQ stays active for one load. Slot 0 is VALID ONLY FOR EVEN THREADS",
+ "MetricExpr": "PM_LSU_SRQ_S0_VALID / PM_LSU_SRQ_S0_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_srq_life_time_even"
+ },
+ {
+ "BriefDescription": "Average number of cycles SRQ stays active for one load. Slot 39 is valid ONLY FOR ODD THREADS",
+ "MetricExpr": "PM_LSU_SRQ_S39_VALID / PM_LSU_SRQ_S39_ALLOC",
+ "MetricGroup": "latency",
+ "MetricName": "avg_srq_life_time_odd"
+ },
+ {
+ "BriefDescription": "Marked background kill latency, measured in L2",
+ "MetricExpr": "PM_MRK_FAB_RSP_BKILL_CYC / PM_MRK_FAB_RSP_BKILL",
+ "MetricGroup": "latency",
+ "MetricName": "bkill_latency"
+ },
+ {
+ "BriefDescription": "Marked dclaim latency, measured in L2",
+ "MetricExpr": "PM_MRK_FAB_RSP_DCLAIM_CYC / PM_MRK_FAB_RSP_DCLAIM",
+ "MetricGroup": "latency",
+ "MetricName": "dclaim_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_MOD_CYC/ PM_MRK_DATA_FROM_DL2L3_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "dl2l3_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 distant Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_SHR_CYC/ PM_MRK_DATA_FROM_DL2L3_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "dl2l3_shr_latency"
+ },
+ {
+ "BriefDescription": "Distant L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL4_CYC/ PM_MRK_DATA_FROM_DL4",
+ "MetricGroup": "latency",
+ "MetricName": "dl4_latency"
+ },
+ {
+ "BriefDescription": "Marked Dmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DMEM_CYC/ PM_MRK_DATA_FROM_DMEM",
+ "MetricGroup": "latency",
+ "MetricName": "dmem_latency"
+ },
+ {
+ "BriefDescription": "estimated exposed miss latency for dL1 misses, ie load miss when we were NTC",
+ "MetricExpr": "PM_MRK_LD_MISS_EXPOSED_CYC / PM_MRK_LD_MISS_EXPOSED",
+ "MetricGroup": "latency",
+ "MetricName": "exposed_dl1miss_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from L2.1 in the M state",
+ "MetricExpr": "PM_MRK_DATA_FROM_L21_MOD_CYC/ PM_MRK_DATA_FROM_L21_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "l21_mod_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from L2.1 in the S state",
+ "MetricExpr": "PM_MRK_DATA_FROM_L21_SHR_CYC/ PM_MRK_DATA_FROM_L21_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "l21_shr_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L2 and suffered a conflict at RC machine dispatch time due to load-hit-store",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST_CYC/ PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST",
+ "MetricGroup": "latency",
+ "MetricName": "l2_disp_conflict_ldhitst_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L2 and suffered a conflict at RC machine dispatch time NOT due load-hit-store",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER_CYC/ PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER",
+ "MetricGroup": "latency",
+ "MetricName": "l2_disp_conflict_other_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L2",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l2_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that were satisfied by lines prefetched into the L3. This information is forwarded from the L3",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_MEPF_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l2_mepf_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L2 and suffered no conflicts",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_NO_CONFLICT_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l2_no_conflict_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L3 and beyond",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2MISS_CYC/ PM_MRK_DATA_FROM_L2MISS",
+ "MetricGroup": "latency",
+ "MetricName": "l2miss_latency"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L31_MOD_CYC/ PM_MRK_DATA_FROM_L31_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "l31_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L31_SHR_CYC/ PM_MRK_DATA_FROM_L31_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "l31_shr_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L3",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3_CYC/ PM_MRK_DATA_FROM_L3",
+ "MetricGroup": "latency",
+ "MetricName": "l3_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that came from the L3 and suffered no conflicts",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3_NO_CONFLICT_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l3_no_conflict_latency"
+ },
+ {
+ "BriefDescription": "Average load latency for all marked demand loads that come from beyond the L3",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3MISS_CYC/ PM_MRK_DATA_FROM_L3MISS",
+ "MetricGroup": "latency",
+ "MetricName": "l3miss_latency"
+ },
+ {
+ "BriefDescription": "Average latency for marked reloads that hit in the L3 on the MEPF state. i.e. lines that were prefetched into the L3",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3_MEPF_CYC/ PM_MRK_DATA_FROM_L3_MEPF",
+ "MetricGroup": "latency",
+ "MetricName": "l3pref_latency"
+ },
+ {
+ "BriefDescription": "Local L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_LL4_CYC/ PM_MRK_DATA_FROM_LL4",
+ "MetricGroup": "latency",
+ "MetricName": "ll4_latency"
+ },
+ {
+ "BriefDescription": "Marked Lmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_LMEM_CYC/ PM_MRK_DATA_FROM_LMEM",
+ "MetricGroup": "latency",
+ "MetricName": "lmem_latency"
+ },
+ {
+ "BriefDescription": "Latency for marked reloads that hit in the L2 or L3 of any other core on a different chip",
+ "MetricExpr": "PM_MRK_DATA_FROM_OFF_CHIP_CACHE_CYC/ PM_MRK_DATA_FROM_OFF_CHIP_CACHE",
+ "MetricGroup": "latency",
+ "MetricName": "off_chip_cache_latency"
+ },
+ {
+ "BriefDescription": "Latency for marked reloads that hit in the L2 or L3 of any other core on the same chip",
+ "MetricExpr": "PM_MRK_DATA_FROM_ON_CHIP_CACHE_CYC/ PM_MRK_DATA_FROM_ON_CHIP_CACHE",
+ "MetricGroup": "latency",
+ "MetricName": "on_chip_cache_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_MOD_CYC/ PM_MRK_DATA_FROM_RL2L3_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "rl2l3_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_SHR_CYC/ PM_MRK_DATA_FROM_RL2L3_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "rl2l3_shr_latency"
+ },
+ {
+ "BriefDescription": "Remote L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL4_CYC/ PM_MRK_DATA_FROM_RL4",
+ "MetricGroup": "latency",
+ "MetricName": "rl4_latency"
+ },
+ {
+ "BriefDescription": "Marked Rmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RMEM_CYC/ PM_MRK_DATA_FROM_RMEM",
+ "MetricGroup": "latency",
+ "MetricName": "rmem_latency"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_ERAT_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "erat_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_ERAT_MISS * 100 / (PM_LSU_FIN - PM_LSU_FX_FIN)",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "erat_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "LHS reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LHS *100/ PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lhs_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "LHS reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LHS *100/ (PM_LSU_FIN - PM_LSU_FX_FIN)",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lhs_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "LMQ full reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lmq_full_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_LD_REF_L1",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lmq_full_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "LSU reject ratio",
+ "MetricExpr": "PM_LSU_REJECT *100/ PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lsu_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "LSU reject ratio",
+ "MetricExpr": "PM_LSU_REJECT *100/ (PM_LSU_FIN - PM_LSU_FX_FIN)",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lsu_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to distant L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_DL4",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to remote+distant L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / (PM_DATA_FROM_DL4 + PM_DATA_FROM_RL4)",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_mem"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to remote L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_RL4",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_rl4"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from remote and distant memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / (PM_DATA_FROM_DMEM + PM_DATA_FROM_RMEM)",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_mem"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from remote memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_RMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_rmem"
+ },
+ {
+ "BriefDescription": "Number of loads from remote memory per loads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_rmem_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Memory locality",
+ "MetricExpr": "(PM_DATA_FROM_LL4 + PM_DATA_FROM_LMEM) * 100/ (PM_DATA_FROM_LMEM + PM_DATA_FROM_LL4 + PM_DATA_FROM_RMEM + PM_DATA_FROM_RL4 + PM_DATA_FROM_DMEM + PM_DATA_FROM_DL4)",
+ "MetricGroup": "memory",
+ "MetricName": "mem_locality_percent"
+ },
+ {
+ "BriefDescription": "DERAT Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_LSU_DERAT_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "derat_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L2 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L3 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT misses that result in an ERAT reload",
+ "MetricExpr": "PM_DTLB_MISS * 100 / PM_LSU_DERAT_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "derat_miss_reload_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L4",
+ "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant Memory",
+ "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L2",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L3",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local L4",
+ "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local Memory",
+ "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L4",
+ "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote Memory",
+ "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "% DERAT miss ratio for 16G page per inst",
+ "MetricExpr": "100 * PM_DERAT_MISS_16G / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_16g_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 16G page",
+ "MetricExpr": "PM_DERAT_MISS_16G / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_16g_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DERAT miss rate for 16M page per inst",
+ "MetricExpr": "PM_DERAT_MISS_16M * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_16m_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 16M page",
+ "MetricExpr": "PM_DERAT_MISS_16M / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_16m_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DERAT miss rate for 4K page per inst",
+ "MetricExpr": "PM_DERAT_MISS_4K * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_4k_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 4K page",
+ "MetricExpr": "PM_DERAT_MISS_4K / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_4k_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DERAT miss ratio for 64K page per inst",
+ "MetricExpr": "PM_DERAT_MISS_64K * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_64k_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 64K page",
+ "MetricExpr": "PM_DERAT_MISS_64K / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_64k_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DSLB_Miss_Rate per inst",
+ "MetricExpr": "PM_DSLB_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "dslb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% ISLB miss rate per inst",
+ "MetricExpr": "PM_ISLB_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "islb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Fraction of hits on any Centaur (local, remote, or distant) on either L4 or DRAM per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_MEMORY / PM_LD_REF_L1",
+ "MetricName": "any_centaur_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Base Completion Cycles",
+ "MetricExpr": "PM_1PLUS_PPC_CMPL / PM_RUN_INST_CMPL",
+ "MetricName": "base_completion_cpi"
+ },
+ {
+ "BriefDescription": "Marked background kill latency, measured in L2",
+ "MetricExpr": "PM_MRK_FAB_RSP_BKILL_CYC / PM_MRK_FAB_RSP_BKILL",
+ "MetricName": "bkill_ratio_percent"
+ },
+ {
+ "BriefDescription": "cycles",
+ "MetricExpr": "PM_RUN_CYC",
+ "MetricName": "custom_secs"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a distant chip's Centaur (L4 or DRAM) per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_DMEM + PM_DATA_FROM_DL4) / PM_LD_REF_L1",
+ "MetricName": "distant_centaur_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads that came from the L3 and beyond",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_l2_miss_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "(PM_DATA_FROM_L31_MOD + PM_DATA_FROM_L31_SHR) * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_l31_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of DL1 reloads from L3 where the lines were brought into the L3 by a prefetch operation",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_l3_mepf_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from beyond the local L3",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_l3_miss_percent"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on the L2 or L3 of a core on a distant chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD / PM_LD_REF_L1",
+ "MetricName": "dl2l3_mod_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the S state on the L2 or L3 of a core on a distant chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR / PM_LD_REF_L1",
+ "MetricName": "dl2l3_shr_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a distant Centaur's cache per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_DL4 / PM_LD_REF_L1",
+ "MetricName": "dl4_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a distant Centaur's DRAM per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_DMEM / PM_LD_REF_L1",
+ "MetricName": "dmem_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Rate of DERAT reloads from L2",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dpteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of DERAT reloads from L3",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dpteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "Overhead of expansion cycles",
+ "MetricExpr": "(PM_GRP_CMPL / PM_RUN_INST_CMPL) - (PM_1PLUS_PPC_CMPL / PM_RUN_INST_CMPL)",
+ "MetricName": "expansion_overhead_cpi"
+ },
+ {
+ "BriefDescription": "Total Fixed point operations executded in the Load/Store Unit following a load/store operation",
+ "MetricExpr": "PM_LSU_FX_FIN/PM_RUN_INST_CMPL",
+ "MetricName": "fixed_in_lsu_per_inst"
+ },
+ {
+ "BriefDescription": "GCT empty cycles",
+ "MetricExpr": "(PM_GCT_NOSLOT_CYC / PM_RUN_CYC) * 100",
+ "MetricName": "gct_empty_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from L2",
+ "MetricExpr": "PM_IPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from L3",
+ "MetricExpr": "PM_IPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from local memory",
+ "MetricExpr": "PM_IPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from local memory",
+ "MetricExpr": "PM_IPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Fraction of L1 hits per load ref",
+ "MetricExpr": "(PM_LD_REF_L1 - PM_LD_MISS_L1) / PM_LD_REF_L1",
+ "MetricName": "l1_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L1 load misses per L1 load ref",
+ "MetricExpr": "PM_LD_MISS_L1 / PM_LD_REF_L1",
+ "MetricName": "l1_ld_miss_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on another core's L2 on the same chip per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_L21_MOD + PM_DATA_FROM_L21_SHR) / PM_LD_REF_L1",
+ "MetricName": "l2_1_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on another core's L2 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD / PM_LD_REF_L1",
+ "MetricName": "l2_1_mod_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the S state on another core's L2 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR / PM_LD_REF_L1",
+ "MetricName": "l2_1_shr_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Average number of Castout machines used. 1 of 16 CO machines is sampled every L2 cycle",
+ "MetricExpr": "(PM_CO_USAGE / PM_RUN_CYC) * 16",
+ "MetricName": "l2_co_usage"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load hits per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L2 / PM_LD_REF_L1",
+ "MetricName": "l2_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load misses per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L2MISS / PM_LD_REF_L1",
+ "MetricName": "l2_ld_miss_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load hits per L1 load ref where the L2 experienced a Load-Hit-Store conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST / PM_LD_REF_L1",
+ "MetricName": "l2_lhs_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load hits per L1 load ref where the L2 did not experience a conflict",
+ "MetricExpr": "PM_DATA_FROM_L2_NO_CONFLICT / PM_LD_REF_L1",
+ "MetricName": "l2_no_conflict_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L2 load hits per L1 load ref where the L2 experienced some conflict other than Load-Hit-Store",
+ "MetricExpr": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER / PM_LD_REF_L1",
+ "MetricName": "l2_other_conflict_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Average number of Read/Claim machines used. 1 of 16 RC machines is sampled every L2 cycle",
+ "MetricExpr": "(PM_RC_USAGE / PM_RUN_CYC) * 16",
+ "MetricName": "l2_rc_usage"
+ },
+ {
+ "BriefDescription": "Average number of Snoop machines used. 1 of 8 SN machines is sampled every L2 cycle",
+ "MetricExpr": "(PM_SN_USAGE / PM_RUN_CYC) * 8",
+ "MetricName": "l2_sn_usage"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "(PM_MRK_DATA_FROM_L31_SHR_CYC + PM_MRK_DATA_FROM_L31_MOD_CYC) / (PM_MRK_DATA_FROM_L31_SHR + PM_MRK_DATA_FROM_L31_MOD)",
+ "MetricName": "l31_latency"
+ },
+ {
+ "BriefDescription": "Fraction of hits on another core's L3 on the same chip per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_L31_MOD + PM_DATA_FROM_L31_SHR) / PM_LD_REF_L1",
+ "MetricName": "l3_1_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on another core's L3 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD / PM_LD_REF_L1",
+ "MetricName": "l3_1_mod_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the S state on another core's L3 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR / PM_LD_REF_L1",
+ "MetricName": "l3_1_shr_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 load hits per load ref where the demand load collided with a pending prefetch",
+ "MetricExpr": "PM_DATA_FROM_L3_DISP_CONFLICT / PM_LD_REF_L1",
+ "MetricName": "l3_conflict_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 load hits per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L3 / PM_LD_REF_L1",
+ "MetricName": "l3_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 load misses per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L3MISS / PM_LD_REF_L1",
+ "MetricName": "l3_ld_miss_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 load hits per load ref where the L3 did not experience a conflict",
+ "MetricExpr": "PM_DATA_FROM_L3_NO_CONFLICT / PM_LD_REF_L1",
+ "MetricName": "l3_no_conflict_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 hits on lines that were not in the MEPF state per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_L3 - PM_DATA_FROM_L3_MEPF) / PM_LD_REF_L1",
+ "MetricName": "l3other_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of L3 hits on lines that were recently prefetched into the L3 (MEPF state) per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF / PM_LD_REF_L1",
+ "MetricName": "l3pref_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a local Centaur's cache per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_LD_REF_L1",
+ "MetricName": "ll4_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a local Centaur's DRAM per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_LD_REF_L1",
+ "MetricName": "lmem_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a local Centaur (L4 or DRAM) per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_LMEM + PM_DATA_FROM_LL4) / PM_LD_REF_L1",
+ "MetricName": "local_centaur_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Cycles stalled by Other LSU Operations",
+ "MetricExpr": "(PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_REJECT - PM_CMPLU_STALL_DCACHE_MISS - PM_CMPLU_STALL_STORE) / (PM_LD_REF_L1 - PM_LD_MISS_L1)",
+ "MetricName": "lsu_stall_avg_cyc_per_l1hit_stfw"
+ },
+ {
+ "BriefDescription": "Fraction of hits on another core's L2 or L3 on a different chip (remote or distant) per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_OFF_CHIP_CACHE / PM_LD_REF_L1",
+ "MetricName": "off_chip_cache_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on another core's L2 or L3 on the same chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_ON_CHIP_CACHE / PM_LD_REF_L1",
+ "MetricName": "on_chip_cache_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a remote chip's Centaur (L4 or DRAM) per L1 load ref",
+ "MetricExpr": "(PM_DATA_FROM_RMEM + PM_DATA_FROM_RL4) / PM_LD_REF_L1",
+ "MetricName": "remote_centaur_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Percent of all FXU/VSU instructions that got rejected because of unavailable resources or facilities",
+ "MetricExpr": "PM_ISU_REJECT_RES_NA *100/ PM_RUN_INST_CMPL",
+ "MetricName": "resource_na_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the M (exclusive) state on the L2 or L3 of a core on a remote chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD / PM_LD_REF_L1",
+ "MetricName": "rl2l3_mod_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits of a line in the S state on the L2 or L3 of a core on a remote chip per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR / PM_LD_REF_L1",
+ "MetricName": "rl2l3_shr_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a remote Centaur's cache per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_RL4 / PM_LD_REF_L1",
+ "MetricName": "rl4_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Fraction of hits on a remote Centaur's DRAM per L1 load ref",
+ "MetricExpr": "PM_DATA_FROM_RMEM / PM_LD_REF_L1",
+ "MetricName": "rmem_ld_hit_ratio"
+ },
+ {
+ "BriefDescription": "Percent of all FXU/VSU instructions that got rejected due to SAR Bypass",
+ "MetricExpr": "PM_ISU_REJECT_SAR_BYPASS *100/ PM_RUN_INST_CMPL",
+ "MetricName": "sar_bypass_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "Percent of all FXU/VSU instructions that got rejected because of unavailable sources",
+ "MetricExpr": "PM_ISU_REJECT_SRC_NA *100/ PM_RUN_INST_CMPL",
+ "MetricName": "source_na_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "Store forward rate",
+ "MetricExpr": "100 * (PM_LSU0_SRQ_STFWD + PM_LSU1_SRQ_STFWD) / PM_RUN_INST_CMPL",
+ "MetricName": "store_forward_rate_percent"
+ },
+ {
+ "BriefDescription": "Store forward rate",
+ "MetricExpr": "100 * (PM_LSU0_SRQ_STFWD + PM_LSU1_SRQ_STFWD) / (PM_LD_REF_L1 - PM_LD_MISS_L1)",
+ "MetricName": "store_forward_ratio_percent"
+ },
+ {
+ "BriefDescription": "Marked store latency, from core completion to L2 RC machine completion",
+ "MetricExpr": "(PM_MRK_ST_L2DISP_TO_CMPL_CYC + PM_MRK_ST_DRAIN_TO_L2DISP_CYC) / PM_MRK_ST_NEST",
+ "MetricName": "store_latency"
+ },
+ {
+ "BriefDescription": "Cycles stalled by any sync",
+ "MetricExpr": "(PM_CMPLU_STALL_LWSYNC + PM_CMPLU_STALL_HWSYNC) / PM_RUN_INST_CMPL",
+ "MetricName": "sync_stall_cpi"
+ },
+ {
+ "BriefDescription": "Percentage of lines that were prefetched into the L3 and evicted before they were consumed",
+ "MetricExpr": "(PM_L3_CO_MEPF / 2) / PM_L3_PREF_ALL * 100",
+ "MetricName": "wasted_l3_prefetch_percent"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/other.json b/tools/perf/pmu-events/arch/powerpc/power8/other.json
index 704302c3e67d..9dc2f6b70354 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/other.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/other.json
@@ -348,18 +348,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x517082",
- "EventName": "PM_CO_DISP_FAIL",
- "BriefDescription": "CO dispatch failed due to all CO machines being busy",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x527084",
- "EventName": "PM_CO_TM_SC_FOOTPRINT",
- "BriefDescription": "L2 did a cleanifdirty CO to the L3 (ie created an SC line in the L3)",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x3608a",
"EventName": "PM_CO_USAGE",
"BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 CO machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running",
@@ -1578,36 +1566,12 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x617082",
- "EventName": "PM_ISIDE_DISP",
- "BriefDescription": "All i-side dispatch attempts",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x627084",
- "EventName": "PM_ISIDE_DISP_FAIL",
- "BriefDescription": "All i-side dispatch attempts that failed due to a addr collision with another machine",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x627086",
- "EventName": "PM_ISIDE_DISP_FAIL_OTHER",
- "BriefDescription": "All i-side dispatch attempts that failed due to a reason other than addrs collision",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x4608e",
"EventName": "PM_ISIDE_L2MEMACC",
"BriefDescription": "valid when first beat of data comes in for an i-side fetch where data came from mem(or L4)",
"PublicDescription": ""
},
{,
- "EventCode": "0x44608e",
- "EventName": "PM_ISIDE_MRU_TOUCH",
- "BriefDescription": "Iside L2 MRU touch",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x30ac",
"EventName": "PM_ISU_REF_FX0",
"BriefDescription": "FX0 ISU reject",
@@ -1734,222 +1698,36 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x417080",
- "EventName": "PM_L2_CASTOUT_MOD",
- "BriefDescription": "L2 Castouts - Modified (M, Mu, Me)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x417082",
- "EventName": "PM_L2_CASTOUT_SHR",
- "BriefDescription": "L2 Castouts - Shared (T, Te, Si, S)",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x27084",
"EventName": "PM_L2_CHIP_PUMP",
"BriefDescription": "RC requests that were local on chip pump attempts",
"PublicDescription": ""
},
{,
- "EventCode": "0x427086",
- "EventName": "PM_L2_DC_INV",
- "BriefDescription": "Dcache invalidates from L2",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x44608c",
- "EventName": "PM_L2_DISP_ALL_L2MISS",
- "BriefDescription": "All successful Ld/St dispatches for this thread that were an L2miss",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x27086",
"EventName": "PM_L2_GROUP_PUMP",
"BriefDescription": "RC requests that were on Node Pump attempts",
"PublicDescription": ""
},
{,
- "EventCode": "0x626084",
- "EventName": "PM_L2_GRP_GUESS_CORRECT",
- "BriefDescription": "L2 guess grp and guess was correct (data intra-6chip AND ^on-chip)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x626086",
- "EventName": "PM_L2_GRP_GUESS_WRONG",
- "BriefDescription": "L2 guess grp and guess was not correct (ie data on-chip OR beyond-6chip)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x427084",
- "EventName": "PM_L2_IC_INV",
- "BriefDescription": "Icache Invalidates from L2",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x436088",
- "EventName": "PM_L2_INST",
- "BriefDescription": "All successful I-side dispatches for this thread (excludes i_l2mru_tch reqs)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x43608a",
- "EventName": "PM_L2_INST_MISS",
- "BriefDescription": "All successful i-side dispatches that were an L2miss for this thread (excludes i_l2mru_tch reqs)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x416080",
- "EventName": "PM_L2_LD",
- "BriefDescription": "All successful D-side Load dispatches for this thread",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x437088",
- "EventName": "PM_L2_LD_DISP",
- "BriefDescription": "All successful load dispatches",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x43708a",
- "EventName": "PM_L2_LD_HIT",
- "BriefDescription": "All successful load dispatches that were L2 hits",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x426084",
- "EventName": "PM_L2_LD_MISS",
- "BriefDescription": "All successful D-Side Load dispatches that were an L2miss for this thread",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x616080",
- "EventName": "PM_L2_LOC_GUESS_CORRECT",
- "BriefDescription": "L2 guess loc and guess was correct (ie data local)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x616082",
- "EventName": "PM_L2_LOC_GUESS_WRONG",
- "BriefDescription": "L2 guess loc and guess was not correct (ie data not on chip)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x516080",
- "EventName": "PM_L2_RCLD_DISP",
- "BriefDescription": "L2 RC load dispatch attempt",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x516082",
- "EventName": "PM_L2_RCLD_DISP_FAIL_ADDR",
- "BriefDescription": "L2 RC load dispatch attempt failed due to address collision with RC/CO/SN/SQ",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x526084",
- "EventName": "PM_L2_RCLD_DISP_FAIL_OTHER",
- "BriefDescription": "L2 RC load dispatch attempt failed due to other reasons",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x536088",
- "EventName": "PM_L2_RCST_DISP",
- "BriefDescription": "L2 RC store dispatch attempt",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x53608a",
- "EventName": "PM_L2_RCST_DISP_FAIL_ADDR",
- "BriefDescription": "L2 RC store dispatch attempt failed due to address collision with RC/CO/SN/SQ",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x54608c",
- "EventName": "PM_L2_RCST_DISP_FAIL_OTHER",
- "BriefDescription": "L2 RC store dispatch attempt failed due to other reasons",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x537088",
- "EventName": "PM_L2_RC_ST_DONE",
- "BriefDescription": "RC did st to line that was Tx or Sx",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x63708a",
- "EventName": "PM_L2_RTY_LD",
- "BriefDescription": "RC retries on PB for any load from core",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x3708a",
"EventName": "PM_L2_RTY_ST",
"BriefDescription": "RC retries on PB for any store from core",
"PublicDescription": ""
},
{,
- "EventCode": "0x54708c",
- "EventName": "PM_L2_SN_M_RD_DONE",
- "BriefDescription": "SNP dispatched for a read and was M",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x54708e",
- "EventName": "PM_L2_SN_M_WR_DONE",
- "BriefDescription": "SNP dispatched for a write and was M",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x53708a",
- "EventName": "PM_L2_SN_SX_I_DONE",
- "BriefDescription": "SNP dispatched and went from Sx or Tx to Ix",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x17080",
"EventName": "PM_L2_ST",
"BriefDescription": "All successful D-side store dispatches for this thread",
"PublicDescription": ""
},
{,
- "EventCode": "0x44708c",
- "EventName": "PM_L2_ST_DISP",
- "BriefDescription": "All successful store dispatches",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x44708e",
- "EventName": "PM_L2_ST_HIT",
- "BriefDescription": "All successful store dispatches that were L2Hits",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x17082",
"EventName": "PM_L2_ST_MISS",
"BriefDescription": "All successful D-side store dispatches for this thread that were L2 Miss",
"PublicDescription": ""
},
{,
- "EventCode": "0x636088",
- "EventName": "PM_L2_SYS_GUESS_CORRECT",
- "BriefDescription": "L2 guess sys and guess was correct (ie data beyond-6chip)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x63608a",
- "EventName": "PM_L2_SYS_GUESS_WRONG",
- "BriefDescription": "L2 guess sys and guess was not correct (ie data ^beyond-6chip)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x617080",
- "EventName": "PM_L2_SYS_PUMP",
- "BriefDescription": "RC requests that were system pump attempts",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x1e05e",
"EventName": "PM_L2_TM_REQ_ABORT",
"BriefDescription": "TM abort",
@@ -1962,36 +1740,12 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x23808a",
- "EventName": "PM_L3_CINJ",
- "BriefDescription": "l3 ci of cache inject",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x128084",
- "EventName": "PM_L3_CI_HIT",
- "BriefDescription": "L3 Castins Hit (total count",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x128086",
- "EventName": "PM_L3_CI_MISS",
- "BriefDescription": "L3 castins miss (total count",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x819082",
"EventName": "PM_L3_CI_USAGE",
"BriefDescription": "rotating sample of 16 CI or CO actives",
"PublicDescription": ""
},
{,
- "EventCode": "0x438088",
- "EventName": "PM_L3_CO",
- "BriefDescription": "l3 castout occurring ( does not include casthrough or log writes (cinj/dmaw)",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x83908b",
"EventName": "PM_L3_CO0_ALLOC",
"BriefDescription": "lifetime, sample of CO machine 0 valid",
@@ -2010,120 +1764,18 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x238088",
- "EventName": "PM_L3_CO_LCO",
- "BriefDescription": "Total L3 castouts occurred on LCO",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x28084",
"EventName": "PM_L3_CO_MEM",
"BriefDescription": "L3 CO to memory OR of port 0 and 1 ( lossy)",
"PublicDescription": ""
},
{,
- "EventCode": "0xb19082",
- "EventName": "PM_L3_GRP_GUESS_CORRECT",
- "BriefDescription": "Initial scope=group and data from same group (near) (pred successful)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xb3908a",
- "EventName": "PM_L3_GRP_GUESS_WRONG_HIGH",
- "BriefDescription": "Initial scope=group but data from local node. Predition too high",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xb39088",
- "EventName": "PM_L3_GRP_GUESS_WRONG_LOW",
- "BriefDescription": "Initial scope=group but data from outside group (far or rem). Prediction too Low",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x218080",
- "EventName": "PM_L3_HIT",
- "BriefDescription": "L3 Hits",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x138088",
- "EventName": "PM_L3_L2_CO_HIT",
- "BriefDescription": "L2 castout hits",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x13808a",
- "EventName": "PM_L3_L2_CO_MISS",
- "BriefDescription": "L2 castout miss",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x14808c",
- "EventName": "PM_L3_LAT_CI_HIT",
- "BriefDescription": "L3 Lateral Castins Hit",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x14808e",
- "EventName": "PM_L3_LAT_CI_MISS",
- "BriefDescription": "L3 Lateral Castins Miss",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x228084",
- "EventName": "PM_L3_LD_HIT",
- "BriefDescription": "L3 demand LD Hits",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x228086",
- "EventName": "PM_L3_LD_MISS",
- "BriefDescription": "L3 demand LD Miss",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x1e052",
"EventName": "PM_L3_LD_PREF",
"BriefDescription": "L3 Load Prefetches",
"PublicDescription": ""
},
{,
- "EventCode": "0xb19080",
- "EventName": "PM_L3_LOC_GUESS_CORRECT",
- "BriefDescription": "initial scope=node/chip and data from local node (local) (pred successful)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xb29086",
- "EventName": "PM_L3_LOC_GUESS_WRONG",
- "BriefDescription": "Initial scope=node but data from out side local node (near or far or rem). Prediction too Low",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x218082",
- "EventName": "PM_L3_MISS",
- "BriefDescription": "L3 Misses",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x54808c",
- "EventName": "PM_L3_P0_CO_L31",
- "BriefDescription": "l3 CO to L3.1 (lco) port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x538088",
- "EventName": "PM_L3_P0_CO_MEM",
- "BriefDescription": "l3 CO to memory port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x929084",
- "EventName": "PM_L3_P0_CO_RTY",
- "BriefDescription": "L3 CO received retry port 0",
- "PublicDescription": ""
- },
- {,
"EventCode": "0xa29084",
"EventName": "PM_L3_P0_GRP_PUMP",
"BriefDescription": "L3 pf sent with grp scope port 0",
@@ -2148,120 +1800,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0xa19080",
- "EventName": "PM_L3_P0_NODE_PUMP",
- "BriefDescription": "L3 pf sent with nodal scope port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x919080",
- "EventName": "PM_L3_P0_PF_RTY",
- "BriefDescription": "L3 PF received retry port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x939088",
- "EventName": "PM_L3_P0_SN_HIT",
- "BriefDescription": "L3 snoop hit port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x118080",
- "EventName": "PM_L3_P0_SN_INV",
- "BriefDescription": "Port0 snooper detects someone doing a store to a line thats Sx",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x94908c",
- "EventName": "PM_L3_P0_SN_MISS",
- "BriefDescription": "L3 snoop miss port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xa39088",
- "EventName": "PM_L3_P0_SYS_PUMP",
- "BriefDescription": "L3 pf sent with sys scope port 0",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x54808e",
- "EventName": "PM_L3_P1_CO_L31",
- "BriefDescription": "l3 CO to L3.1 (lco) port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x53808a",
- "EventName": "PM_L3_P1_CO_MEM",
- "BriefDescription": "l3 CO to memory port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x929086",
- "EventName": "PM_L3_P1_CO_RTY",
- "BriefDescription": "L3 CO received retry port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xa29086",
- "EventName": "PM_L3_P1_GRP_PUMP",
- "BriefDescription": "L3 pf sent with grp scope port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x528086",
- "EventName": "PM_L3_P1_LCO_DATA",
- "BriefDescription": "lco sent with data port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x518082",
- "EventName": "PM_L3_P1_LCO_NO_DATA",
- "BriefDescription": "dataless l3 lco sent port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xa4908e",
- "EventName": "PM_L3_P1_LCO_RTY",
- "BriefDescription": "L3 LCO received retry port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xa19082",
- "EventName": "PM_L3_P1_NODE_PUMP",
- "BriefDescription": "L3 pf sent with nodal scope port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x919082",
- "EventName": "PM_L3_P1_PF_RTY",
- "BriefDescription": "L3 PF received retry port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x93908a",
- "EventName": "PM_L3_P1_SN_HIT",
- "BriefDescription": "L3 snoop hit port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x118082",
- "EventName": "PM_L3_P1_SN_INV",
- "BriefDescription": "Port1 snooper detects someone doing a store to a line thats Sx",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x94908e",
- "EventName": "PM_L3_P1_SN_MISS",
- "BriefDescription": "L3 snoop miss port 1",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xa3908a",
- "EventName": "PM_L3_P1_SYS_PUMP",
- "BriefDescription": "L3 pf sent with sys scope port 1",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x84908d",
"EventName": "PM_L3_PF0_ALLOC",
"BriefDescription": "lifetime, sample of PF machine 0 valid",
@@ -2274,12 +1812,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x428084",
- "EventName": "PM_L3_PF_HIT_L3",
- "BriefDescription": "l3 pf hit in l3",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x18080",
"EventName": "PM_L3_PF_MISS_L3",
"BriefDescription": "L3 Prefetch missed in L3",
@@ -2370,42 +1902,12 @@
"PublicDescription": ""
},
{,
- "EventCode": "0xb29084",
- "EventName": "PM_L3_SYS_GUESS_CORRECT",
- "BriefDescription": "Initial scope=system and data from outside group (far or rem)(pred successful)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0xb4908c",
- "EventName": "PM_L3_SYS_GUESS_WRONG",
- "BriefDescription": "Initial scope=system but data from local or near. Predction too high",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x24808e",
- "EventName": "PM_L3_TRANS_PF",
- "BriefDescription": "L3 Transient prefetch",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x18081",
"EventName": "PM_L3_WI0_ALLOC",
"BriefDescription": "lifetime, sample of Write Inject machine 0 valid",
"PublicDescription": "0.0"
},
{,
- "EventCode": "0x418080",
- "EventName": "PM_L3_WI0_BUSY",
- "BriefDescription": "lifetime, sample of Write Inject machine 0 valid",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x418082",
- "EventName": "PM_L3_WI_USAGE",
- "BriefDescription": "rotating sample of 8 WI actives",
- "PublicDescription": ""
- },
- {,
"EventCode": "0xc080",
"EventName": "PM_LD_REF_L1_LSU0",
"BriefDescription": "LS0 L1 D cache load references counted at finish, gated by reject",
@@ -3312,12 +2814,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x328084",
- "EventName": "PM_NON_TM_RST_SC",
- "BriefDescription": "non tm snp rst tm sc",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x2001a",
"EventName": "PM_NTCG_ALL_FIN",
"BriefDescription": "Cycles after all instructions have finished to group completed",
@@ -3420,24 +2916,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x34808e",
- "EventName": "PM_RD_CLEARING_SC",
- "BriefDescription": "rd clearing sc",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x34808c",
- "EventName": "PM_RD_FORMING_SC",
- "BriefDescription": "rd forming sc",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x428086",
- "EventName": "PM_RD_HIT_PF",
- "BriefDescription": "rd machine hit l3 pf machine",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x20004",
"EventName": "PM_REAL_SRQ_FULL",
"BriefDescription": "Out of real srq entries",
@@ -3504,18 +2982,6 @@
"PublicDescription": "TLBIE snoopSnoop TLBIE"
},
{,
- "EventCode": "0x338088",
- "EventName": "PM_SNP_TM_HIT_M",
- "BriefDescription": "snp tm st hit m mu",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x33808a",
- "EventName": "PM_SNP_TM_HIT_T",
- "BriefDescription": "snp tm_st_hit t tn te",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x4608c",
"EventName": "PM_SN_USAGE",
"BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 SN machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running",
@@ -3534,12 +3000,6 @@
"PublicDescription": "STCX executed reported at sent to nest42"
},
{,
- "EventCode": "0x717080",
- "EventName": "PM_ST_CAUSED_FAIL",
- "BriefDescription": "Non TM St caused any thread to fail",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x3090",
"EventName": "PM_SWAP_CANCEL",
"BriefDescription": "SWAP cancel , rtag not available",
@@ -3624,18 +3084,6 @@
"PublicDescription": ""
},
{,
- "EventCode": "0x318082",
- "EventName": "PM_TM_CAM_OVERFLOW",
- "BriefDescription": "l3 tm cam overflow during L2 co of SC",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x74708c",
- "EventName": "PM_TM_CAP_OVERFLOW",
- "BriefDescription": "TM Footprint Capactiy Overflow",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x20ba",
"EventName": "PM_TM_END_ALL",
"BriefDescription": "Tm any tend",
@@ -3690,48 +3138,6 @@
"PublicDescription": "Transactional conflict from LSU, whatever gets reported to texas 42"
},
{,
- "EventCode": "0x727086",
- "EventName": "PM_TM_FAV_CAUSED_FAIL",
- "BriefDescription": "TM Load (fav) caused another thread to fail",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x717082",
- "EventName": "PM_TM_LD_CAUSED_FAIL",
- "BriefDescription": "Non TM Ld caused any thread to fail",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x727084",
- "EventName": "PM_TM_LD_CONF",
- "BriefDescription": "TM Load (fav or non-fav) ran into conflict (failed)",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x328086",
- "EventName": "PM_TM_RST_SC",
- "BriefDescription": "tm snp rst tm sc",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x318080",
- "EventName": "PM_TM_SC_CO",
- "BriefDescription": "l3 castout tm Sc line",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x73708a",
- "EventName": "PM_TM_ST_CAUSED_FAIL",
- "BriefDescription": "TM Store (fav or non-fav) caused another thread to fail",
- "PublicDescription": ""
- },
- {,
- "EventCode": "0x737088",
- "EventName": "PM_TM_ST_CONF",
- "BriefDescription": "TM Store (fav or non-fav) ran into conflict (failed)",
- "PublicDescription": ""
- },
- {,
"EventCode": "0x20bc",
"EventName": "PM_TM_TBEGIN",
"BriefDescription": "Tm nested tbegin",
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/metrics.json b/tools/perf/pmu-events/arch/powerpc/power9/metrics.json
new file mode 100644
index 000000000000..811c2a8c1c9e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power9/metrics.json
@@ -0,0 +1,1982 @@
+[
+ {
+ "MetricExpr": "PM_BR_MPRED_CMPL / PM_BR_PRED * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "br_misprediction_percent"
+ },
+ {
+ "BriefDescription": "Count cache branch misprediction per instruction",
+ "MetricExpr": "PM_BR_MPRED_CCACHE / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ccache_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Count cache branch misprediction",
+ "MetricExpr": "PM_BR_MPRED_CCACHE / PM_BR_PRED_CCACHE * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "ccache_misprediction_percent"
+ },
+ {
+ "BriefDescription": "Link stack branch misprediction",
+ "MetricExpr": "PM_BR_MPRED_LSTACK / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "lstack_mispredict_rate_percent"
+ },
+ {
+ "BriefDescription": "Link stack branch misprediction",
+ "MetricExpr": "PM_BR_MPRED_LSTACK/ PM_BR_PRED_LSTACK * 100",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "lstack_misprediction_percent"
+ },
+ {
+ "BriefDescription": "% Branches Taken",
+ "MetricExpr": "PM_BR_TAKEN_CMPL * 100 / PM_BRU_FIN",
+ "MetricGroup": "branch_prediction",
+ "MetricName": "taken_branches_percent"
+ },
+ {
+ "BriefDescription": "Completion stall due to a Branch Unit",
+ "MetricExpr": "PM_CMPLU_STALL_BRU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "bru_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was routed to the crypto execution pipe and was waiting to finish",
+ "MetricExpr": "PM_CMPLU_STALL_CRYPTO/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "crypto_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load that missed the L1 and was waiting for the data to return from the nest",
+ "MetricExpr": "PM_CMPLU_STALL_DCACHE_MISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dcache_miss_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a multi-cycle instruction issued to the Decimal Floating Point execution pipe and waiting to finish.",
+ "MetricExpr": "PM_CMPLU_STALL_DFLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dflong_stall_cpi"
+ },
+ {
+ "BriefDescription": "Stalls due to short latency decimal floating ops.",
+ "MetricExpr": "(PM_CMPLU_STALL_DFU - PM_CMPLU_STALL_DFLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dfu_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was issued to the Decimal Floating Point execution pipe and waiting to finish.",
+ "MetricExpr": "PM_CMPLU_STALL_DFU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dfu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved off node memory/cache",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L3MISS - PM_CMPLU_STALL_DMISS_L21_L31 - PM_CMPLU_STALL_DMISS_LMEM - PM_CMPLU_STALL_DMISS_REMOTE)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_distant_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved on chip ( excluding local L2/L3)",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L21_L31/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l21_l31_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to cache miss that resolves in the L2 or L3 with a conflict",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3_CONFLICT/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l2l3_conflict_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to cache miss that resolves in the L2 or L3 without conflict",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L2L3 - PM_CMPLU_STALL_DMISS_L2L3_CONFLICT)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l2l3_noconflict_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved in L2/L3",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L2L3/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l2l3_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to cache miss resolving missed the L3",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_L3MISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_l3miss_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to cache miss that resolves in local memory",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_LMEM/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_lmem_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved outside of local memory",
+ "MetricExpr": "(PM_CMPLU_STALL_DMISS_L3MISS - PM_CMPLU_STALL_DMISS_L21_L31 - PM_CMPLU_STALL_DMISS_LMEM)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_non_local_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by Dcache miss which resolved from remote chip (cache or memory)",
+ "MetricExpr": "PM_CMPLU_STALL_DMISS_REMOTE/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dmiss_remote_stall_cpi"
+ },
+ {
+ "BriefDescription": "Stalls due to short latency double precision ops.",
+ "MetricExpr": "(PM_CMPLU_STALL_DP - PM_CMPLU_STALL_DPLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dp_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a scalar instruction issued to the Double Precision execution pipe and waiting to finish. Includes binary floating point instructions in 32 and 64 bit binary floating point format.",
+ "MetricExpr": "PM_CMPLU_STALL_DP/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dp_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a scalar multi-cycle instruction issued to the Double Precision execution pipe and waiting to finish. Includes binary floating point instructions in 32 and 64 bit binary floating point format.",
+ "MetricExpr": "PM_CMPLU_STALL_DPLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "dplong_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction is an EIEIO waiting for response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_EIEIO/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "eieio_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the next to finish instruction suffered an ERAT miss and the EMQ was full",
+ "MetricExpr": "PM_CMPLU_STALL_EMQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "emq_full_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_ERAT_MISS + PM_CMPLU_STALL_EMQ_FULL)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "emq_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load or store that suffered a translation miss",
+ "MetricExpr": "PM_CMPLU_STALL_ERAT_MISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "erat_miss_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the NTC instruction is not allowed to complete because it was interrupted by ANY exception, which has to be serviced before the instruction can complete",
+ "MetricExpr": "PM_CMPLU_STALL_EXCEPTION/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "exception_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to execution units for other reasons.",
+ "MetricExpr": "(PM_CMPLU_STALL_EXEC_UNIT - PM_CMPLU_STALL_FXU - PM_CMPLU_STALL_DP - PM_CMPLU_STALL_DFU - PM_CMPLU_STALL_PM - PM_CMPLU_STALL_CRYPTO - PM_CMPLU_STALL_VFXU - PM_CMPLU_STALL_VDP)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "exec_unit_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to execution units (FXU/VSU/CRU)",
+ "MetricExpr": "PM_CMPLU_STALL_EXEC_UNIT/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "exec_unit_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the NTC instruction is not allowed to complete because any of the 4 threads in the same core suffered a flush, which blocks completion",
+ "MetricExpr": "PM_CMPLU_STALL_FLUSH_ANY_THREAD/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "flush_any_thread_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to a long latency scalar fixed point instruction (division, square root)",
+ "MetricExpr": "PM_CMPLU_STALL_FXLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxlong_stall_cpi"
+ },
+ {
+ "BriefDescription": "Stalls due to short latency integer ops",
+ "MetricExpr": "(PM_CMPLU_STALL_FXU - PM_CMPLU_STALL_FXLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall due to a scalar fixed point or CR instruction in the execution pipeline. These instructions get routed to the ALU, ALU2, and DIV pipes",
+ "MetricExpr": "PM_CMPLU_STALL_FXU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "fxu_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_NTC_ISSUE_HELD_DARQ_FULL + PM_NTC_ISSUE_HELD_ARB + PM_NTC_ISSUE_HELD_OTHER)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "issue_hold_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a larx waiting to be satisfied",
+ "MetricExpr": "PM_CMPLU_STALL_LARX/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "larx_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load that hit on an older store and it was waiting for store data",
+ "MetricExpr": "PM_CMPLU_STALL_LHS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lhs_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load that missed in the L1 and the LMQ was unable to accept this load miss request because it was full",
+ "MetricExpr": "PM_CMPLU_STALL_LMQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lmq_full_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load instruction with all its dependencies satisfied just going through the LSU pipe to finish",
+ "MetricExpr": "PM_CMPLU_STALL_LOAD_FINISH/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "load_finish_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load that was held in LSAQ because the LRQ was full",
+ "MetricExpr": "PM_CMPLU_STALL_LRQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lrq_full_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall due to LRQ miscellaneous reasons, lost arbitration to LMQ slot, bank collisions, set prediction cleanup, set prediction multihit and others",
+ "MetricExpr": "PM_CMPLU_STALL_LRQ_OTHER/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lrq_other_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_LMQ_FULL + PM_CMPLU_STALL_ST_FWD + PM_CMPLU_STALL_LHS + PM_CMPLU_STALL_LSU_MFSPR + PM_CMPLU_STALL_LARX + PM_CMPLU_STALL_LRQ_OTHER)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lrq_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a load or store that was held in LSAQ because an older instruction from SRQ or LRQ won arbitration to the LSU pipe when this instruction tried to launch",
+ "MetricExpr": "PM_CMPLU_STALL_LSAQ_ARB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsaq_arb_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_LRQ_FULL + PM_CMPLU_STALL_SRQ_FULL + PM_CMPLU_STALL_LSAQ_ARB)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsaq_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was an LSU op (other than a load or a store) with all its dependencies met and just going through the LSU pipe to finish",
+ "MetricExpr": "PM_CMPLU_STALL_LSU_FIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_fin_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall of one cycle because the LSU requested to flush the next iop in the sequence. It takes 1 cycle for the ISU to process this request before the LSU instruction is allowed to complete",
+ "MetricExpr": "PM_CMPLU_STALL_LSU_FLUSH_NEXT/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_flush_next_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a mfspr instruction targeting an LSU SPR and it was waiting for the register data to be returned",
+ "MetricExpr": "PM_CMPLU_STALL_LSU_MFSPR/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_mfspr_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion LSU stall for other reasons",
+ "MetricExpr": "(PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_LSU_FIN - PM_CMPLU_STALL_STORE_FINISH - PM_CMPLU_STALL_STORE_DATA - PM_CMPLU_STALL_EIEIO - PM_CMPLU_STALL_STCX - PM_CMPLU_STALL_SLB - PM_CMPLU_STALL_TEND - PM_CMPLU_STALL_PASTE - PM_CMPLU_STALL_TLBIE - PM_CMPLU_STALL_STORE_PIPE_ARB - PM_CMPLU_STALL_STORE_FIN_ARB - PM_CMPLU_STALL_LOAD_FINISH + PM_CMPLU_STALL_DCACHE_MISS - PM_CMPLU_STALL_LMQ_FULL - PM_CMPLU_STALL_ST_FWD - PM_CMPLU_STALL_LHS - PM_CMPLU_STALL_LSU_MFSPR - PM_CMPLU_STALL_LARX - PM_CMPLU_STALL_LRQ_OTHER + PM_CMPLU_STALL_ERAT_MISS + PM_CMPLU_STALL_EMQ_FULL - PM_CMPLU_STALL_LRQ_FULL - PM_CMPLU_STALL_SRQ_FULL - PM_CMPLU_STALL_LSAQ_ARB) / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall by LSU instruction",
+ "MetricExpr": "PM_CMPLU_STALL_LSU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "lsu_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall because the ISU is updating the register and notifying the Effective Address Table (EAT)",
+ "MetricExpr": "PM_CMPLU_STALL_MTFPSCR/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "mtfpscr_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall because the ISU is updating the TEXASR to keep track of the nested tbegin. This is a short delay, and it includes ROT",
+ "MetricExpr": "PM_CMPLU_STALL_NESTED_TBEGIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "nested_tbegin_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall because the ISU is updating the TEXASR to keep track of the nested tend and decrement the TEXASR nested level. This is a short delay",
+ "MetricExpr": "PM_CMPLU_STALL_NESTED_TEND/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "nested_tend_stall_cpi"
+ },
+ {
+ "BriefDescription": "Number of cycles the ICT has no itags assigned to this thread",
+ "MetricExpr": "PM_ICT_NOSLOT_CYC/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "nothing_dispatched_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was one that must finish at dispatch.",
+ "MetricExpr": "PM_CMPLU_STALL_NTC_DISP_FIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_disp_fin_stall_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline (NTC) finishes. This event is used to account for cycles in which work is being completed in the CPI stack",
+ "MetricExpr": "PM_NTC_FIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_fin_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to ntc flush",
+ "MetricExpr": "PM_CMPLU_STALL_NTC_FLUSH/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_flush_stall_cpi"
+ },
+ {
+ "BriefDescription": "The NTC instruction is being held at dispatch because it lost arbitration onto the issue pipe to another instruction (from the same thread or a different thread)",
+ "MetricExpr": "PM_NTC_ISSUE_HELD_ARB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_issue_held_arb_cpi"
+ },
+ {
+ "BriefDescription": "The NTC instruction is being held at dispatch because there are no slots in the DARQ for it",
+ "MetricExpr": "PM_NTC_ISSUE_HELD_DARQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_issue_held_darq_full_cpi"
+ },
+ {
+ "BriefDescription": "The NTC instruction is being held at dispatch during regular pipeline cycles, or because the VSU is busy with multi-cycle instructions, or because of a write-back collision with VSU",
+ "MetricExpr": "PM_NTC_ISSUE_HELD_OTHER/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ntc_issue_held_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles unaccounted for.",
+ "MetricExpr": "(PM_RUN_CYC - PM_1PLUS_PPC_CMPL - PM_CMPLU_STALL_THRD - PM_CMPLU_STALL - PM_ICT_NOSLOT_CYC)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall for other reasons",
+ "MetricExpr": "PM_CMPLU_STALL - PM_CMPLU_STALL_NTC_DISP_FIN - PM_CMPLU_STALL_NTC_FLUSH - PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_EXEC_UNIT - PM_CMPLU_STALL_BRU)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a paste waiting for response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_PASTE/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "paste_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was issued to the Permute execution pipe and waiting to finish.",
+ "MetricExpr": "PM_CMPLU_STALL_PM/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "pm_stall_cpi"
+ },
+ {
+ "BriefDescription": "Run cycles per run instruction",
+ "MetricExpr": "PM_RUN_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "run_cpi"
+ },
+ {
+ "BriefDescription": "Run_cycles",
+ "MetricExpr": "PM_RUN_CYC/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "run_cyc_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_FXU + PM_CMPLU_STALL_DP + PM_CMPLU_STALL_DFU + PM_CMPLU_STALL_PM + PM_CMPLU_STALL_CRYPTO)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "scalar_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was awaiting L2 response for an SLB",
+ "MetricExpr": "PM_CMPLU_STALL_SLB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "slb_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall while waiting for the non-speculative finish of either a stcx waiting for its result or a load waiting for non-critical sectors of data and ECC",
+ "MetricExpr": "PM_CMPLU_STALL_SPEC_FINISH/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "spec_finish_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a store that was held in LSAQ because the SRQ was full",
+ "MetricExpr": "PM_CMPLU_STALL_SRQ_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "srq_full_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_STORE_DATA + PM_CMPLU_STALL_EIEIO + PM_CMPLU_STALL_STCX + PM_CMPLU_STALL_SLB + PM_CMPLU_STALL_TEND + PM_CMPLU_STALL_PASTE + PM_CMPLU_STALL_TLBIE + PM_CMPLU_STALL_STORE_PIPE_ARB + PM_CMPLU_STALL_STORE_FIN_ARB)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "srq_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to store forward",
+ "MetricExpr": "PM_CMPLU_STALL_ST_FWD/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "st_fwd_stall_cpi"
+ },
+ {
+ "BriefDescription": "Nothing completed and ICT not empty",
+ "MetricExpr": "PM_CMPLU_STALL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a stcx waiting for response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_STCX/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "stcx_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the next to finish instruction was a store waiting on data",
+ "MetricExpr": "PM_CMPLU_STALL_STORE_DATA/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "store_data_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a store waiting for a slot in the store finish pipe. This means the instruction is ready to finish but there are instructions ahead of it, using the finish pipe",
+ "MetricExpr": "PM_CMPLU_STALL_STORE_FIN_ARB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "store_fin_arb_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a store with all its dependencies met, just waiting to go through the LSU pipe to finish",
+ "MetricExpr": "PM_CMPLU_STALL_STORE_FINISH/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "store_finish_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a store waiting for the next relaunch opportunity after an internal reject. This means the instruction is ready to relaunch and tried once but lost arbitration",
+ "MetricExpr": "PM_CMPLU_STALL_STORE_PIPE_ARB/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "store_pipe_arb_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a tend instruction awaiting response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_TEND/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "tend_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion Stalled because the thread was blocked",
+ "MetricExpr": "PM_CMPLU_STALL_THRD/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "thread_block_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a tlbie waiting for response from L2",
+ "MetricExpr": "PM_CMPLU_STALL_TLBIE/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "tlbie_stall_cpi"
+ },
+ {
+ "BriefDescription": "Vector stalls due to small latency double precision ops",
+ "MetricExpr": "(PM_CMPLU_STALL_VDP - PM_CMPLU_STALL_VDPLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vdp_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a vector instruction issued to the Double Precision execution pipe and waiting to finish.",
+ "MetricExpr": "PM_CMPLU_STALL_VDP/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vdp_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall because the NTF instruction was a scalar multi-cycle instruction issued to the Double Precision execution pipe and waiting to finish. Includes binary floating point instructions in 32 and 64 bit binary floating point format.",
+ "MetricExpr": "PM_CMPLU_STALL_VDPLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vdplong_stall_cpi"
+ },
+ {
+ "MetricExpr": "(PM_CMPLU_STALL_VFXU + PM_CMPLU_STALL_VDP)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vector_stall_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall due to a long latency vector fixed point instruction (division, square root)",
+ "MetricExpr": "PM_CMPLU_STALL_VFXLONG/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vfxlong_stall_cpi"
+ },
+ {
+ "BriefDescription": "Vector stalls due to small latency integer ops",
+ "MetricExpr": "(PM_CMPLU_STALL_VFXU - PM_CMPLU_STALL_VFXLONG)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vfxu_other_stall_cpi"
+ },
+ {
+ "BriefDescription": "Finish stall due to a vector fixed point instruction in the execution pipeline. These instructions get routed to the ALU, ALU2, and DIV pipes",
+ "MetricExpr": "PM_CMPLU_STALL_VFXU/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "vfxu_stall_cpi"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3 M state, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3 S tate, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads that came from the L3 and were brought into the L3 by a prefetch, per instruction completed",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_mepf_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L3 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from L3 per Inst",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Local Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "dl1_reload_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 demand load misses per run instruction",
+ "MetricExpr": "PM_LD_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "dl1_reloads_percent_per_inst",
+ "MetricName": "l1_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 misses that result in a cache reload",
+ "MetricExpr": "PM_L1_DCACHE_RELOAD_VALID * 100 / PM_LD_MISS_L1",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_miss_reloads_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant Memory",
+ "MetricExpr": "PM_DATA_FROM_DMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L2, other core",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from sources beyond the local L2",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_miss_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from L2",
+ "MetricExpr": "PM_DATA_FROM_L2 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads that came from L3 and were brought into the L3 by a prefetch",
+ "MetricExpr": "PM_DATA_FROM_L3_MEPF * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_mepf_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from sources beyond the local L3",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_miss_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from L3",
+ "MetricExpr": "PM_DATA_FROM_L3 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Local Memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote Memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricGroup": "dl1_reloads_percent_per_ref",
+ "MetricName": "dl1_reload_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 distant MOD miss rates with measured DL2L3 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_MOD * PM_MRK_DATA_FROM_DL2L3_MOD_CYC / PM_MRK_DATA_FROM_DL2L3_MOD / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl2l3_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 distant SHR miss rates with measured DL2L3 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_DL2L3_SHR * PM_MRK_DATA_FROM_DL2L3_SHR_CYC / PM_MRK_DATA_FROM_DL2L3_SHR / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl2l3_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of distant L4 miss rates with measured DL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_DL4 * PM_MRK_DATA_FROM_DL4_CYC / PM_MRK_DATA_FROM_DL4 / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dl4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of distant memory miss rates with measured DMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_DMEM * PM_MRK_DATA_FROM_DMEM_CYC / PM_MRK_DATA_FROM_DMEM / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "dmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl21 MOD miss rates with measured L21 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L21_MOD * PM_MRK_DATA_FROM_L21_MOD_CYC / PM_MRK_DATA_FROM_L21_MOD / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l21_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl21 SHR miss rates with measured L21 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L21_SHR * PM_MRK_DATA_FROM_L21_SHR_CYC / PM_MRK_DATA_FROM_L21_SHR / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l21_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2 miss rates with measured L2 latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L2 * PM_MRK_DATA_FROM_L2_CYC / PM_MRK_DATA_FROM_L2 / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l2_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl31 MOD miss rates with measured L31 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L31_MOD * PM_MRK_DATA_FROM_L31_MOD_CYC / PM_MRK_DATA_FROM_L31_MOD / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l31_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl31 SHR miss rates with measured L31 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L31_SHR * PM_MRK_DATA_FROM_L31_SHR_CYC / PM_MRK_DATA_FROM_L31_SHR / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l31_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl3 miss rates with measured L3 latency as a % of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_L3 * PM_MRK_DATA_FROM_L3_CYC / PM_MRK_DATA_FROM_L3 / PM_CMPLU_STALL_DCACHE_MISS * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "l3_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of Local memory miss rates with measured LMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_LMEM * PM_MRK_DATA_FROM_LMEM_CYC / PM_MRK_DATA_FROM_LMEM / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "lmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 remote MOD miss rates with measured RL2L3 MOD latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_MOD * PM_MRK_DATA_FROM_RL2L3_MOD_CYC / PM_MRK_DATA_FROM_RL2L3_MOD / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl2l3_mod_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of dl2l3 shared miss rates with measured RL2L3 SHR latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_RL2L3_SHR * PM_MRK_DATA_FROM_RL2L3_SHR_CYC / PM_MRK_DATA_FROM_RL2L3_SHR / PM_CMPLU_STALL_DCACHE_MISS * 100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl2l3_shr_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of remote L4 miss rates with measured RL4 latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_RL4 * PM_MRK_DATA_FROM_RL4_CYC / PM_MRK_DATA_FROM_RL4 / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rl4_cpi_percent"
+ },
+ {
+ "BriefDescription": "estimate of remote memory miss rates with measured RMEM latency as a %of dcache miss cpi",
+ "MetricExpr": "PM_DATA_FROM_RMEM * PM_MRK_DATA_FROM_RMEM_CYC / PM_MRK_DATA_FROM_RMEM / PM_CMPLU_STALL_DCACHE_MISS *100",
+ "MetricGroup": "estimated_dcache_miss_cpi",
+ "MetricName": "rmem_cpi_percent"
+ },
+ {
+ "BriefDescription": "Branch Mispredict flushes per instruction",
+ "MetricExpr": "PM_FLUSH_MPRED / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "general",
+ "MetricName": "br_mpred_flush_rate_percent"
+ },
+ {
+ "BriefDescription": "Cycles per instruction",
+ "MetricExpr": "PM_CYC / PM_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "cpi"
+ },
+ {
+ "BriefDescription": "GCT empty cycles",
+ "MetricExpr": "(PM_FLUSH_DISP / PM_RUN_INST_CMPL) * 100",
+ "MetricGroup": "general",
+ "MetricName": "disp_flush_rate_percent"
+ },
+ {
+ "BriefDescription": "% DTLB miss rate per inst",
+ "MetricExpr": "PM_DTLB_MISS / PM_RUN_INST_CMPL *100",
+ "MetricGroup": "general",
+ "MetricName": "dtlb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Flush rate (%)",
+ "MetricExpr": "PM_FLUSH * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "flush_rate_percent"
+ },
+ {
+ "BriefDescription": "Instructions per cycles",
+ "MetricExpr": "PM_INST_CMPL / PM_CYC",
+ "MetricGroup": "general",
+ "MetricName": "ipc"
+ },
+ {
+ "BriefDescription": "% ITLB miss rate per inst",
+ "MetricExpr": "PM_ITLB_MISS / PM_RUN_INST_CMPL *100",
+ "MetricGroup": "general",
+ "MetricName": "itlb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 load misses per L1 load ref",
+ "MetricExpr": "PM_LD_MISS_L1 / PM_LD_REF_L1 * 100",
+ "MetricGroup": "general",
+ "MetricName": "l1_ld_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 store misses per run instruction",
+ "MetricExpr": "PM_ST_MISS_L1 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l1_st_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Percentage of L1 store misses per L1 store ref",
+ "MetricExpr": "PM_ST_MISS_L1 / PM_ST_FIN * 100",
+ "MetricGroup": "general",
+ "MetricName": "l1_st_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "L2 Instruction Miss Rate (per instruction)(%)",
+ "MetricExpr": "PM_INST_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 dmand Load Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DATA_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 PTEG Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DPTEG_FROM_L2MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l2_pteg_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 Instruction Miss Rate (per instruction)(%)",
+ "MetricExpr": "PM_INST_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 demand Load Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DATA_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_ld_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "L3 PTEG Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_DPTEG_FROM_L3MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "l3_pteg_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Run cycles per cycle",
+ "MetricExpr": "PM_RUN_CYC / PM_CYC*100",
+ "MetricGroup": "general",
+ "MetricName": "run_cycles_percent"
+ },
+ {
+ "BriefDescription": "Instruction dispatch-to-completion ratio",
+ "MetricExpr": "PM_INST_DISP / PM_INST_CMPL",
+ "MetricGroup": "general",
+ "MetricName": "speculation"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L2 per Inst",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3 other core per Inst",
+ "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L3 per Inst",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified) per Inst",
+ "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared) per Inst",
+ "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L4 per Inst",
+ "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "inst_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Instruction Cache Miss Rate (Per run Instruction)(%)",
+ "MetricExpr": "PM_L1_ICACHE_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "instruction_misses_percent_per_inst",
+ "MetricName": "l1_inst_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "Icache Fetchs per Icache Miss",
+ "MetricExpr": "(PM_L1_ICACHE_MISS - PM_IC_PREF_WRITE) / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "icache_miss_reload"
+ },
+ {
+ "BriefDescription": "% of ICache reloads due to prefetch",
+ "MetricExpr": "PM_IC_PREF_WRITE * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "icache_pref_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_INST_FROM_DL2L3_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_INST_FROM_DL2L3_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant L4",
+ "MetricExpr": "PM_INST_FROM_DL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Distant Memory",
+ "MetricExpr": "PM_INST_FROM_DMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core",
+ "MetricExpr": "PM_INST_FROM_L21_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L2, other core",
+ "MetricExpr": "PM_INST_FROM_L21_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L2",
+ "MetricExpr": "PM_INST_FROM_L2 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core",
+ "MetricExpr": "PM_INST_FROM_L31_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Private L3, other core",
+ "MetricExpr": "PM_INST_FROM_L31_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from L3",
+ "MetricExpr": "PM_INST_FROM_L3 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local L4",
+ "MetricExpr": "PM_INST_FROM_LL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Local Memory",
+ "MetricExpr": "PM_INST_FROM_LMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_INST_FROM_RL2L3_MOD * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_INST_FROM_RL2L3_SHR * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote L4",
+ "MetricExpr": "PM_INST_FROM_RL4 * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of ICache reloads from Remote Memory",
+ "MetricExpr": "PM_INST_FROM_RMEM * 100 / PM_L1_ICACHE_MISS",
+ "MetricGroup": "instruction_stats_percent_per_ref",
+ "MetricName": "inst_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "%L2 Modified CO Cache read Utilization (4 pclks per disp attempt)",
+ "MetricExpr": "((PM_L2_CASTOUT_MOD/2)*4)/ PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_co_m_rd_util"
+ },
+ {
+ "BriefDescription": "L2 dcache invalidates per run inst (per core)",
+ "MetricExpr": "(PM_L2_DC_INV / 2) / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_dc_inv_rate_percent"
+ },
+ {
+ "BriefDescription": "Demand load misses as a % of L2 LD dispatches (per thread)",
+ "MetricExpr": "PM_L1_DCACHE_RELOAD_VALID / (PM_L2_LD / 2) * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_dem_ld_disp_percent"
+ },
+ {
+ "BriefDescription": "L2 Icache invalidates per run inst (per core)",
+ "MetricExpr": "(PM_L2_IC_INV / 2) / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ic_inv_rate_percent"
+ },
+ {
+ "BriefDescription": "L2 Inst misses as a % of total L2 Inst dispatches (per thread)",
+ "MetricExpr": "PM_L2_INST_MISS / PM_L2_INST * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_inst_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "Average number of cycles between L2 Load hits",
+ "MetricExpr": "(PM_L2_LD_HIT / PM_RUN_CYC) / 2",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ld_hit_frequency"
+ },
+ {
+ "BriefDescription": "Average number of cycles between L2 Load misses",
+ "MetricExpr": "(PM_L2_LD_MISS / PM_RUN_CYC) / 2",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ld_miss_frequency"
+ },
+ {
+ "BriefDescription": "L2 Load misses as a % of total L2 Load dispatches (per thread)",
+ "MetricExpr": "PM_L2_LD_MISS / PM_L2_LD * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ld_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "% L2 load disp attempts Cache read Utilization (4 pclks per disp attempt)",
+ "MetricExpr": "((PM_L2_RCLD_DISP/2)*4)/ PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ld_rd_util"
+ },
+ {
+ "BriefDescription": "L2 load misses that require a cache write (4 pclks per disp attempt) % of pclks",
+ "MetricExpr": "((( PM_L2_LD_DISP - PM_L2_LD_HIT)/2)*4)/ PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_ldmiss_wr_util"
+ },
+ {
+ "BriefDescription": "L2 local pump prediction success",
+ "MetricExpr": "PM_L2_LOC_GUESS_CORRECT / (PM_L2_LOC_GUESS_CORRECT + PM_L2_LOC_GUESS_WRONG) * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_local_pred_correct_percent"
+ },
+ {
+ "BriefDescription": "L2 COs that were in M,Me,Mu state as a % of all L2 COs",
+ "MetricExpr": "PM_L2_CASTOUT_MOD / (PM_L2_CASTOUT_MOD + PM_L2_CASTOUT_SHR) * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_mod_co_percent"
+ },
+ {
+ "BriefDescription": "% of L2 Load RC dispatch atampts that failed because of address collisions and cclass conflicts",
+ "MetricExpr": "(PM_L2_RCLD_DISP_FAIL_ADDR )/ PM_L2_RCLD_DISP * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rc_ld_disp_addr_fail_percent"
+ },
+ {
+ "BriefDescription": "% of L2 Load RC dispatch attempts that failed",
+ "MetricExpr": "(PM_L2_RCLD_DISP_FAIL_ADDR + PM_L2_RCLD_DISP_FAIL_OTHER)/ PM_L2_RCLD_DISP * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rc_ld_disp_fail_percent"
+ },
+ {
+ "BriefDescription": "% of L2 Store RC dispatch atampts that failed because of address collisions and cclass conflicts",
+ "MetricExpr": "PM_L2_RCST_DISP_FAIL_ADDR / PM_L2_RCST_DISP * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rc_st_disp_addr_fail_percent"
+ },
+ {
+ "BriefDescription": "% of L2 Store RC dispatch attempts that failed",
+ "MetricExpr": "(PM_L2_RCST_DISP_FAIL_ADDR + PM_L2_RCST_DISP_FAIL_OTHER)/ PM_L2_RCST_DISP * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rc_st_disp_fail_percent"
+ },
+ {
+ "BriefDescription": "L2 Cache Read Utilization (per core)",
+ "MetricExpr": "(((PM_L2_RCLD_DISP/2)*4)/ PM_RUN_CYC * 100) + (((PM_L2_RCST_DISP/2)*4)/PM_RUN_CYC * 100) + (((PM_L2_CASTOUT_MOD/2)*4)/PM_RUN_CYC * 100)",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_rd_util_percent"
+ },
+ {
+ "BriefDescription": "L2 COs that were in T,Te,Si,S state as a % of all L2 COs",
+ "MetricExpr": "PM_L2_CASTOUT_SHR / (PM_L2_CASTOUT_MOD + PM_L2_CASTOUT_SHR) * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_shr_co_percent"
+ },
+ {
+ "BriefDescription": "L2 Store misses as a % of total L2 Store dispatches (per thread)",
+ "MetricExpr": "PM_L2_ST_MISS / PM_L2_ST * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_st_miss_ratio_percent"
+ },
+ {
+ "BriefDescription": "% L2 store disp attempts Cache read Utilization (4 pclks per disp attempt)",
+ "MetricExpr": "((PM_L2_RCST_DISP/2)*4) / PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_st_rd_util"
+ },
+ {
+ "BriefDescription": "L2 stores that require a cache write (4 pclks per disp attempt) % of pclks",
+ "MetricExpr": "((PM_L2_ST_DISP/2)*4) / PM_RUN_CYC * 100",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_st_wr_util"
+ },
+ {
+ "BriefDescription": "L2 Cache Write Utilization (per core)",
+ "MetricExpr": "((((PM_L2_LD_DISP - PM_L2_LD_HIT)/2)*4) / PM_RUN_CYC * 100) + (((PM_L2_ST_DISP/2)*4) / PM_RUN_CYC * 100)",
+ "MetricGroup": "l2_stats",
+ "MetricName": "l2_wr_util_percent"
+ },
+ {
+ "BriefDescription": "Average number of cycles between L3 Load hits",
+ "MetricExpr": "(PM_L3_LD_HIT / PM_RUN_CYC) / 2",
+ "MetricGroup": "l3_stats",
+ "MetricName": "l3_ld_hit_frequency"
+ },
+ {
+ "BriefDescription": "Average number of cycles between L3 Load misses",
+ "MetricExpr": "(PM_L3_LD_MISS / PM_RUN_CYC) / 2",
+ "MetricGroup": "l3_stats",
+ "MetricName": "l3_ld_miss_frequency"
+ },
+ {
+ "BriefDescription": "Average number of Write-in machines used. 1 of 8 WI machines is sampled every L3 cycle",
+ "MetricExpr": "(PM_L3_WI_USAGE / PM_RUN_CYC) * 8",
+ "MetricGroup": "l3_stats",
+ "MetricName": "l3_wi_usage"
+ },
+ {
+ "BriefDescription": "Average icache miss latency",
+ "MetricExpr": "PM_IC_DEMAND_CYC / PM_IC_DEMAND_REQ",
+ "MetricGroup": "latency",
+ "MetricName": "average_il1_miss_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_MOD_CYC/ PM_MRK_DATA_FROM_DL2L3_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "dl2l3_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 distant Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL2L3_SHR_CYC/ PM_MRK_DATA_FROM_DL2L3_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "dl2l3_shr_latency"
+ },
+ {
+ "BriefDescription": "Distant L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DL4_CYC/ PM_MRK_DATA_FROM_DL4",
+ "MetricGroup": "latency",
+ "MetricName": "dl4_latency"
+ },
+ {
+ "BriefDescription": "Marked Dmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_DMEM_CYC/ PM_MRK_DATA_FROM_DMEM",
+ "MetricGroup": "latency",
+ "MetricName": "dmem_latency"
+ },
+ {
+ "BriefDescription": "average L1 miss latency using marked events",
+ "MetricExpr": "PM_MRK_LD_MISS_L1_CYC / PM_MRK_LD_MISS_L1",
+ "MetricGroup": "latency",
+ "MetricName": "estimated_dl1miss_latency"
+ },
+ {
+ "BriefDescription": "Marked L21 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L21_MOD_CYC/ PM_MRK_DATA_FROM_L21_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "l21_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L21 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L21_SHR_CYC/ PM_MRK_DATA_FROM_L21_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "l21_shr_latency"
+ },
+ {
+ "BriefDescription": "Marked L2 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L2_CYC/ PM_MRK_DATA_FROM_L2",
+ "MetricGroup": "latency",
+ "MetricName": "l2_latency"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L31_MOD_CYC/ PM_MRK_DATA_FROM_L31_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "l31_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L31_SHR_CYC/ PM_MRK_DATA_FROM_L31_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "l31_shr_latency"
+ },
+ {
+ "BriefDescription": "Marked L3 Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_L3_CYC/ PM_MRK_DATA_FROM_L3",
+ "MetricGroup": "latency",
+ "MetricName": "l3_latency"
+ },
+ {
+ "BriefDescription": "Local L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_LL4_CYC/ PM_MRK_DATA_FROM_LL4",
+ "MetricGroup": "latency",
+ "MetricName": "ll4_latency"
+ },
+ {
+ "BriefDescription": "Marked Lmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_LMEM_CYC/ PM_MRK_DATA_FROM_LMEM",
+ "MetricGroup": "latency",
+ "MetricName": "lmem_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_MOD_CYC/ PM_MRK_DATA_FROM_RL2L3_MOD",
+ "MetricGroup": "latency",
+ "MetricName": "rl2l3_mod_latency"
+ },
+ {
+ "BriefDescription": "Marked L2L3 remote Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL2L3_SHR_CYC/ PM_MRK_DATA_FROM_RL2L3_SHR",
+ "MetricGroup": "latency",
+ "MetricName": "rl2l3_shr_latency"
+ },
+ {
+ "BriefDescription": "Remote L4 average load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RL4_CYC/ PM_MRK_DATA_FROM_RL4",
+ "MetricGroup": "latency",
+ "MetricName": "rl4_latency"
+ },
+ {
+ "BriefDescription": "Marked Rmem Load latency",
+ "MetricExpr": "PM_MRK_DATA_FROM_RMEM_CYC/ PM_MRK_DATA_FROM_RMEM",
+ "MetricGroup": "latency",
+ "MetricName": "rmem_latency"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_ERAT_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "erat_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "LHS reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LHS *100/ PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lhs_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lmq_full_reject_rate_percent"
+ },
+ {
+ "BriefDescription": "ERAT miss reject ratio",
+ "MetricExpr": "PM_LSU_REJECT_LMQ_FULL * 100 / PM_LD_REF_L1",
+ "MetricGroup": "lsu_rejects",
+ "MetricName": "lmq_full_reject_ratio_percent"
+ },
+ {
+ "BriefDescription": "L4 locality(%)",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / (PM_DATA_FROM_LL4 + PM_DATA_FROM_RL4 + PM_DATA_FROM_DL4)",
+ "MetricGroup": "memory",
+ "MetricName": "l4_locality"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to distant L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_DL4",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to remote+distant L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / (PM_DATA_FROM_DL4 + PM_DATA_FROM_RL4)",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_mem"
+ },
+ {
+ "BriefDescription": "Ratio of reloads from local L4 to remote L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 / PM_DATA_FROM_RL4",
+ "MetricGroup": "memory",
+ "MetricName": "ld_ll4_per_ld_rl4"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from remote and distant memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / (PM_DATA_FROM_DMEM + PM_DATA_FROM_RMEM)",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_mem"
+ },
+ {
+ "BriefDescription": "Number of loads from local memory per loads from remote memory",
+ "MetricExpr": "PM_DATA_FROM_LMEM / PM_DATA_FROM_RMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_lmem_per_ld_rmem"
+ },
+ {
+ "BriefDescription": "Number of loads from remote memory per loads from distant memory",
+ "MetricExpr": "PM_DATA_FROM_RMEM / PM_DATA_FROM_DMEM",
+ "MetricGroup": "memory",
+ "MetricName": "ld_rmem_per_ld_dmem"
+ },
+ {
+ "BriefDescription": "Memory locality",
+ "MetricExpr": "PM_DATA_FROM_LMEM * 100/ (PM_DATA_FROM_LMEM + PM_DATA_FROM_RMEM + PM_DATA_FROM_DMEM)",
+ "MetricGroup": "memory",
+ "MetricName": "mem_locality_percent"
+ },
+ {
+ "BriefDescription": "L1 Prefetches issued by the prefetch machine per instruction (per thread)",
+ "MetricExpr": "PM_L1_PREF / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "prefetch",
+ "MetricName": "l1_prefetch_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT Miss Rate (per run instruction)(%)",
+ "MetricExpr": "PM_LSU_DERAT_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "derat_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_dmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l21_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l21_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L2 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l31_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l31_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L3 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl2l3_mod_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared) per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl2l3_shr_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L4 per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote Memory per inst",
+ "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "pteg_reloads_percent_per_inst",
+ "MetricName": "pteg_from_rmem_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT misses that result in an ERAT reload",
+ "MetricExpr": "PM_DTLB_MISS * 100 / PM_LSU_DERAT_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "derat_miss_reload_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Modified)",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L2 or L3 (Shared)",
+ "MetricExpr": "PM_DPTEG_FROM_DL2L3_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant L4",
+ "MetricExpr": "PM_DPTEG_FROM_DL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Distant Memory",
+ "MetricExpr": "PM_DPTEG_FROM_DMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_dmem_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L21_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l21_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L2, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L21_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l21_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L2",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l2_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L31_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l31_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Private L3, other core",
+ "MetricExpr": "PM_DPTEG_FROM_L31_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l31_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from L3",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_l3_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local L4",
+ "MetricExpr": "PM_DPTEG_FROM_LL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Local Memory",
+ "MetricExpr": "PM_DPTEG_FROM_LMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_lmem_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Modified)",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_MOD * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl2l3_mod_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L2 or L3 (Shared)",
+ "MetricExpr": "PM_DPTEG_FROM_RL2L3_SHR * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl2l3_shr_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote L4",
+ "MetricExpr": "PM_DPTEG_FROM_RL4 * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of DERAT reloads from Remote Memory",
+ "MetricExpr": "PM_DPTEG_FROM_RMEM * 100 / PM_DTLB_MISS",
+ "MetricGroup": "pteg_reloads_percent_per_ref",
+ "MetricName": "pteg_from_rmem_percent"
+ },
+ {
+ "BriefDescription": "% DERAT miss rate for 4K page per inst",
+ "MetricExpr": "PM_DERAT_MISS_4K * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_4k_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 4K page",
+ "MetricExpr": "PM_DERAT_MISS_4K / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_4k_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DERAT miss ratio for 64K page per inst",
+ "MetricExpr": "PM_DERAT_MISS_64K * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "derat_64k_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 64K page",
+ "MetricExpr": "PM_DERAT_MISS_64K / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_64k_miss_ratio"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio",
+ "MetricExpr": "PM_LSU_DERAT_MISS / PM_LSU_DERAT_MISS",
+ "MetricGroup": "translation",
+ "MetricName": "derat_miss_ratio"
+ },
+ {
+ "BriefDescription": "% DSLB_Miss_Rate per inst",
+ "MetricExpr": "PM_DSLB_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "dslb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "% ISLB miss rate per inst",
+ "MetricExpr": "PM_ISLB_MISS * 100 / PM_RUN_INST_CMPL",
+ "MetricGroup": "translation",
+ "MetricName": "islb_miss_rate_percent"
+ },
+ {
+ "BriefDescription": "ANY_SYNC_STALL_CPI",
+ "MetricExpr": "PM_CMPLU_STALL_ANY_SYNC / PM_RUN_INST_CMPL",
+ "MetricName": "any_sync_stall_cpi"
+ },
+ {
+ "BriefDescription": "Avg. more than 1 instructions completed",
+ "MetricExpr": "PM_INST_CMPL / PM_1PLUS_PPC_CMPL",
+ "MetricName": "average_completed_instruction_set_size"
+ },
+ {
+ "BriefDescription": "% Branches per instruction",
+ "MetricExpr": "PM_BRU_FIN / PM_RUN_INST_CMPL",
+ "MetricName": "branches_per_inst"
+ },
+ {
+ "BriefDescription": "Cycles in which at least one instruction completes in this thread",
+ "MetricExpr": "PM_1PLUS_PPC_CMPL/PM_RUN_INST_CMPL",
+ "MetricName": "completion_cpi"
+ },
+ {
+ "BriefDescription": "cycles",
+ "MetricExpr": "PM_RUN_CYC",
+ "MetricName": "custom_secs"
+ },
+ {
+ "BriefDescription": "Percentage Cycles atleast one instruction dispatched",
+ "MetricExpr": "PM_1PLUS_PPC_DISP / PM_CYC * 100",
+ "MetricName": "cycles_atleast_one_inst_dispatched_percent"
+ },
+ {
+ "BriefDescription": "Cycles per instruction group",
+ "MetricExpr": "PM_CYC / PM_1PLUS_PPC_CMPL",
+ "MetricName": "cycles_per_completed_instructions_set"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Distant L4",
+ "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_dl4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Distant L4 per Inst",
+ "MetricExpr": "PM_DATA_FROM_DL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_dl4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
+ "MetricExpr": "(PM_DATA_FROM_L31_MOD + PM_DATA_FROM_L31_SHR) * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_l31_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Local L4",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_ll4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Local L4 per Inst",
+ "MetricExpr": "PM_DATA_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 dL1_Reloads from Remote L4",
+ "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_L1_DCACHE_RELOAD_VALID",
+ "MetricName": "dl1_reload_from_rl4_percent"
+ },
+ {
+ "BriefDescription": "% of DL1 Reloads from Remote Memory per Inst",
+ "MetricExpr": "PM_DATA_FROM_RL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dl1_reload_from_rl4_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of DERAT reloads from L2",
+ "MetricExpr": "PM_DPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dpteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of DERAT reloads from L3",
+ "MetricExpr": "PM_DPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "dpteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "Cycles in which the oldest instruction is finished and ready to complete for waiting to get through the completion pipe",
+ "MetricExpr": "PM_NTC_ALL_FIN / PM_RUN_INST_CMPL",
+ "MetricName": "finish_to_cmpl_cpi"
+ },
+ {
+ "BriefDescription": "Total Fixed point operations",
+ "MetricExpr": "PM_FXU_FIN/PM_RUN_INST_CMPL",
+ "MetricName": "fixed_per_inst"
+ },
+ {
+ "BriefDescription": "All FXU Busy",
+ "MetricExpr": "PM_FXU_BUSY / PM_CYC",
+ "MetricName": "fxu_all_busy"
+ },
+ {
+ "BriefDescription": "All FXU Idle",
+ "MetricExpr": "PM_FXU_IDLE / PM_CYC",
+ "MetricName": "fxu_all_idle"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to branch mispred",
+ "MetricExpr": "PM_ICT_NOSLOT_BR_MPRED/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_br_mpred_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to Icache Miss and branch mispred",
+ "MetricExpr": "PM_ICT_NOSLOT_BR_MPRED_ICMISS/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_br_mpred_icmiss_cpi"
+ },
+ {
+ "BriefDescription": "ICT other stalls",
+ "MetricExpr": "(PM_ICT_NOSLOT_CYC - PM_ICT_NOSLOT_IC_MISS - PM_ICT_NOSLOT_BR_MPRED_ICMISS - PM_ICT_NOSLOT_BR_MPRED - PM_ICT_NOSLOT_DISP_HELD)/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_cyc_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the NTC instruciton is held at dispatch for any reason",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to dispatch holds because the History Buffer was full. Could be GPR/VSR/VMR/FPR/CR/XVF",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_HB_FULL/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_hb_full_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to dispatch hold on this thread due to Issue q full, BRQ full, XVCF Full, Count cache, Link, Tar full",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_ISSQ/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_issq_cpi"
+ },
+ {
+ "BriefDescription": "ICT_NOSLOT_DISP_HELD_OTHER_CPI",
+ "MetricExpr": "(PM_ICT_NOSLOT_DISP_HELD - PM_ICT_NOSLOT_DISP_HELD_HB_FULL - PM_ICT_NOSLOT_DISP_HELD_SYNC - PM_ICT_NOSLOT_DISP_HELD_TBEGIN - PM_ICT_NOSLOT_DISP_HELD_ISSQ)/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_other_cpi"
+ },
+ {
+ "BriefDescription": "Dispatch held due to a synchronizing instruction at dispatch",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_SYNC/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_sync_cpi"
+ },
+ {
+ "BriefDescription": "the NTC instruction is being held at dispatch because it is a tbegin instruction and there is an older tbegin in the pipeline that must complete before the younger tbegin can dispatch",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_TBEGIN/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_disp_held_tbegin_cpi"
+ },
+ {
+ "BriefDescription": "ICT_NOSLOT_IC_L2_CPI",
+ "MetricExpr": "(PM_ICT_NOSLOT_IC_MISS - PM_ICT_NOSLOT_IC_L3 - PM_ICT_NOSLOT_IC_L3MISS)/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_ic_l2_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to icache misses that were sourced from the local L3",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_L3/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_ic_l3_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to icache misses that were sourced from beyond the local L3. The source could be local/remote/distant memory or another core's cache",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_L3MISS/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_ic_l3miss_cpi"
+ },
+ {
+ "BriefDescription": "Ict empty for this thread due to Icache Miss",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_MISS/PM_RUN_INST_CMPL",
+ "MetricName": "ict_noslot_ic_miss_cpi"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from L2",
+ "MetricExpr": "PM_IPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_l2_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from L3",
+ "MetricExpr": "PM_IPTEG_FROM_L3 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_l3_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from local memory",
+ "MetricExpr": "PM_IPTEG_FROM_LL4 * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_ll4_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of IERAT reloads from local memory",
+ "MetricExpr": "PM_IPTEG_FROM_LMEM * 100 / PM_RUN_INST_CMPL",
+ "MetricName": "ipteg_from_lmem_rate_percent"
+ },
+ {
+ "BriefDescription": "Average number of Castout machines used. 1 of 16 CO machines is sampled every L2 cycle",
+ "MetricExpr": "PM_CO_USAGE / PM_RUN_CYC * 16",
+ "MetricName": "l2_co_usage"
+ },
+ {
+ "BriefDescription": "Percent of instruction reads out of all L2 commands",
+ "MetricExpr": "PM_ISIDE_DISP * 100 / (PM_L2_ST + PM_L2_LD + PM_ISIDE_DISP)",
+ "MetricName": "l2_instr_commands_percent"
+ },
+ {
+ "BriefDescription": "Percent of loads out of all L2 commands",
+ "MetricExpr": "PM_L2_LD * 100 / (PM_L2_ST + PM_L2_LD + PM_ISIDE_DISP)",
+ "MetricName": "l2_ld_commands_percent"
+ },
+ {
+ "BriefDescription": "Rate of L2 store dispatches that failed per core",
+ "MetricExpr": "100 * (PM_L2_RCST_DISP_FAIL_ADDR + PM_L2_RCST_DISP_FAIL_OTHER)/2 / PM_RUN_INST_CMPL",
+ "MetricName": "l2_rc_st_disp_fail_rate_percent"
+ },
+ {
+ "BriefDescription": "Average number of Read/Claim machines used. 1 of 16 RC machines is sampled every L2 cycle",
+ "MetricExpr": "PM_RC_USAGE / PM_RUN_CYC * 16",
+ "MetricName": "l2_rc_usage"
+ },
+ {
+ "BriefDescription": "Average number of Snoop machines used. 1 of 8 SN machines is sampled every L2 cycle",
+ "MetricExpr": "PM_SN_USAGE / PM_RUN_CYC * 8",
+ "MetricName": "l2_sn_usage"
+ },
+ {
+ "BriefDescription": "Percent of stores out of all L2 commands",
+ "MetricExpr": "PM_L2_ST * 100 / (PM_L2_ST + PM_L2_LD + PM_ISIDE_DISP)",
+ "MetricName": "l2_st_commands_percent"
+ },
+ {
+ "BriefDescription": "Rate of L2 store dispatches that failed per core",
+ "MetricExpr": "100 * (PM_L2_RCST_DISP_FAIL_ADDR + PM_L2_RCST_DISP_FAIL_OTHER)/2 / PM_RUN_INST_CMPL",
+ "MetricName": "l2_st_disp_fail_rate_percent"
+ },
+ {
+ "BriefDescription": "Rate of L2 dispatches per core",
+ "MetricExpr": "100 * PM_L2_RCST_DISP/2 / PM_RUN_INST_CMPL",
+ "MetricName": "l2_st_disp_rate_percent"
+ },
+ {
+ "BriefDescription": "Marked L31 Load latency",
+ "MetricExpr": "(PM_MRK_DATA_FROM_L31_SHR_CYC + PM_MRK_DATA_FROM_L31_MOD_CYC) / (PM_MRK_DATA_FROM_L31_SHR + PM_MRK_DATA_FROM_L31_MOD)",
+ "MetricName": "l31_latency"
+ },
+ {
+ "BriefDescription": "PCT instruction loads",
+ "MetricExpr": "PM_LD_REF_L1 / PM_RUN_INST_CMPL",
+ "MetricName": "loads_per_inst"
+ },
+ {
+ "BriefDescription": "Cycles stalled by D-Cache Misses",
+ "MetricExpr": "PM_CMPLU_STALL_DCACHE_MISS / PM_RUN_INST_CMPL",
+ "MetricName": "lsu_stall_dcache_miss_cpi"
+ },
+ {
+ "BriefDescription": "Completion stall because a different thread was using the completion pipe",
+ "MetricExpr": "(PM_CMPLU_STALL_THRD - PM_CMPLU_STALL_EXCEPTION - PM_CMPLU_STALL_ANY_SYNC - PM_CMPLU_STALL_SYNC_PMU_INT - PM_CMPLU_STALL_SPEC_FINISH - PM_CMPLU_STALL_FLUSH_ANY_THREAD - PM_CMPLU_STALL_LSU_FLUSH_NEXT - PM_CMPLU_STALL_NESTED_TBEGIN - PM_CMPLU_STALL_NESTED_TEND - PM_CMPLU_STALL_MTFPSCR)/PM_RUN_INST_CMPL",
+ "MetricName": "other_thread_cmpl_stall"
+ },
+ {
+ "BriefDescription": "PCT instruction stores",
+ "MetricExpr": "PM_ST_FIN / PM_RUN_INST_CMPL",
+ "MetricName": "stores_per_inst"
+ },
+ {
+ "BriefDescription": "ANY_SYNC_STALL_CPI",
+ "MetricExpr": "PM_CMPLU_STALL_SYNC_PMU_INT / PM_RUN_INST_CMPL",
+ "MetricName": "sync_pmu_int_stall_cpi"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
index e7a3524b748f..68618152ea2c 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
@@ -4,7 +4,7 @@
"EventCode": "128",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
- "PublicDescription": "Counter:128 Name:L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ "PublicDescription": "L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
},
{
"Unit": "CPU-M-CF",
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json b/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json
new file mode 100644
index 000000000000..93ddfd8053ca
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json
@@ -0,0 +1,12 @@
+[
+ {
+ "EventName": "bp_l1_btb_correct",
+ "EventCode": "0x8a",
+ "BriefDescription": "L1 BTB Correction."
+ },
+ {
+ "EventName": "bp_l2_btb_correct",
+ "EventCode": "0x8b",
+ "BriefDescription": "L2 BTB Correction."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json b/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json
new file mode 100644
index 000000000000..fad4af9142cb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json
@@ -0,0 +1,287 @@
+[
+ {
+ "EventName": "ic_fw32",
+ "EventCode": "0x80",
+ "BriefDescription": "The number of 32B fetch windows transferred from IC pipe to DE instruction decoder (includes non-cacheable and cacheable fill responses)."
+ },
+ {
+ "EventName": "ic_fw32_miss",
+ "EventCode": "0x81",
+ "BriefDescription": "The number of 32B fetch windows tried to read the L1 IC and missed in the full tag."
+ },
+ {
+ "EventName": "ic_cache_fill_l2",
+ "EventCode": "0x82",
+ "BriefDescription": "The number of 64 byte instruction cache line was fulfilled from the L2 cache."
+ },
+ {
+ "EventName": "ic_cache_fill_sys",
+ "EventCode": "0x83",
+ "BriefDescription": "The number of 64 byte instruction cache line fulfilled from system memory or another cache."
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_hit",
+ "EventCode": "0x84",
+ "BriefDescription": "The number of instruction fetches that miss in the L1 ITLB but hit in the L2 ITLB."
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_miss",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs."
+ },
+ {
+ "EventName": "bp_snp_re_sync",
+ "EventCode": "0x86",
+ "BriefDescription": "The number of pipeline restarts caused by invalidating probes that hit on the instruction stream currently being executed. This would happen if the active instruction stream was being modified by another processor in an MP system - typically a highly unlikely event."
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_any",
+ "EventCode": "0x87",
+ "BriefDescription": "IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
+ "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_dq_empty",
+ "EventCode": "0x87",
+ "BriefDescription": "IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
+ "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_back_pressure",
+ "EventCode": "0x87",
+ "BriefDescription": "IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
+ "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ic_cache_inval.l2_invalidating_probe",
+ "EventCode": "0x8c",
+ "BriefDescription": "IC line invalidated due to L2 invalidating probe (external or LS).",
+ "PublicDescription": "The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core. IC line invalidated due to L2 invalidating probe (external or LS).",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ic_cache_inval.fill_invalidated",
+ "EventCode": "0x8c",
+ "BriefDescription": "IC line invalidated due to overwriting fill response.",
+ "PublicDescription": "The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core. IC line invalidated due to overwriting fill response.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "bp_tlb_rel",
+ "EventCode": "0x99",
+ "BriefDescription": "The number of ITLB reload requests."
+ },
+ {
+ "EventName": "l2_request_g1.rd_blk_l",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_request_g1.rd_blk_x",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_request_g1.ls_rd_blk_c_s",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_request_g1.cacheable_ic_read",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_request_g1.change_to_x",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "l2_request_g1.prefetch_l2",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_request_g1.l2_hw_pf",
+ "EventCode": "0x60",
+ "BriefDescription": "Requests to L2 Group1.",
+ "PublicDescription": "Requests to L2 Group1.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "l2_request_g1.other_requests",
+ "EventCode": "0x60",
+ "BriefDescription": "Events covered by l2_request_g2.",
+ "PublicDescription": "Requests to L2 Group1. Events covered by l2_request_g2.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_request_g2.group1",
+ "EventCode": "0x61",
+ "BriefDescription": "All Group 1 commands not in unit0.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. All Group 1 commands not in unit0.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_request_g2.ls_rd_sized",
+ "EventCode": "0x61",
+ "BriefDescription": "RdSized, RdSized32, RdSized64.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. RdSized, RdSized32, RdSized64.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_request_g2.ls_rd_sized_nc",
+ "EventCode": "0x61",
+ "BriefDescription": "RdSizedNC, RdSized32NC, RdSized64NC.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. RdSizedNC, RdSized32NC, RdSized64NC.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_request_g2.ic_rd_sized",
+ "EventCode": "0x61",
+ "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_request_g2.ic_rd_sized_nc",
+ "EventCode": "0x61",
+ "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "l2_request_g2.smc_inval",
+ "EventCode": "0x61",
+ "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_request_g2.bus_locks_originator",
+ "EventCode": "0x61",
+ "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "l2_request_g2.bus_locks_responses",
+ "EventCode": "0x61",
+ "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_latency.l2_cycles_waiting_on_fills",
+ "EventCode": "0x62",
+ "BriefDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
+ "PublicDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_wcb_req.wcb_write",
+ "EventCode": "0x63",
+ "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) write requests.",
+ "BriefDescription": "LS to L2 WCB write requests.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_wcb_req.wcb_close",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB close requests.",
+ "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) close requests.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_wcb_req.zero_byte_store",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB zero byte store requests.",
+ "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) zero byte store requests.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_wcb_req.cl_zero",
+ "EventCode": "0x63",
+ "PublicDescription": "LS to L2 WCB cache line zeroing requests.",
+ "BriefDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) cache line zeroing requests.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_cs",
+ "EventCode": "0x64",
+ "BriefDescription": "LS ReadBlock C/S Hit.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS ReadBlock C/S Hit.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "LS Read Block L Hit X.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS Read Block L Hit X.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "LsRdBlkL Hit Shared.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LsRdBlkL Hit Shared.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_x",
+ "EventCode": "0x64",
+ "BriefDescription": "LsRdBlkX/ChgToX Hit X. Count RdBlkX finding Shared as a Miss.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LsRdBlkX/ChgToX Hit X. Count RdBlkX finding Shared as a Miss.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_c",
+ "EventCode": "0x64",
+ "BriefDescription": "LS Read Block C S L X Change to X Miss.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS Read Block C S L X Change to X Miss.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "IC Fill Hit Exclusive Stale.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Hit Exclusive Stale.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "IC Fill Hit Shared.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Hit Shared.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_miss",
+ "EventCode": "0x64",
+ "BriefDescription": "IC Fill Miss.",
+ "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Miss.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "l2_fill_pending.l2_fill_busy",
+ "EventCode": "0x6d",
+ "BriefDescription": "Total cycles spent with one or more fill requests in flight from L2.",
+ "PublicDescription": "Total cycles spent with one or more fill requests in flight from L2.",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/core.json b/tools/perf/pmu-events/arch/x86/amdfam17h/core.json
new file mode 100644
index 000000000000..7b285b0a7f35
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/core.json
@@ -0,0 +1,134 @@
+[
+ {
+ "EventName": "ex_ret_instr",
+ "EventCode": "0xc0",
+ "BriefDescription": "Retired Instructions."
+ },
+ {
+ "EventName": "ex_ret_cops",
+ "EventCode": "0xc1",
+ "BriefDescription": "Retired Uops.",
+ "PublicDescription": "The number of uOps retired. This includes all processor activity (instructions, exceptions, interrupts, microcode assists, etc.). The number of events logged per cycle can vary from 0 to 4."
+ },
+ {
+ "EventName": "ex_ret_brn",
+ "EventCode": "0xc2",
+ "BriefDescription": "[Retired Branch Instructions.",
+ "PublicDescription": "The number of branch instructions retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+ },
+ {
+ "EventName": "ex_ret_brn_misp",
+ "EventCode": "0xc3",
+ "BriefDescription": "Retired Branch Instructions Mispredicted.",
+ "PublicDescription": "The number of branch instructions retired, of any type, that were not correctly predicted. This includes those for which prediction is not attempted (far control transfers, exceptions and interrupts)."
+ },
+ {
+ "EventName": "ex_ret_brn_tkn",
+ "EventCode": "0xc4",
+ "BriefDescription": "Retired Taken Branch Instructions.",
+ "PublicDescription": "The number of taken branches that were retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+ },
+ {
+ "EventName": "ex_ret_brn_tkn_misp",
+ "EventCode": "0xc5",
+ "BriefDescription": "Retired Taken Branch Instructions Mispredicted.",
+ "PublicDescription": "The number of retired taken branch instructions that were mispredicted."
+ },
+ {
+ "EventName": "ex_ret_brn_far",
+ "EventCode": "0xc6",
+ "BriefDescription": "Retired Far Control Transfers.",
+ "PublicDescription": "The number of far control transfers retired including far call/jump/return, IRET, SYSCALL and SYSRET, plus exceptions and interrupts. Far control transfers are not subject to branch prediction."
+ },
+ {
+ "EventName": "ex_ret_brn_resync",
+ "EventCode": "0xc7",
+ "BriefDescription": "Retired Branch Resyncs.",
+ "PublicDescription": "The number of resync branches. These reflect pipeline restarts due to certain microcode assists and events such as writes to the active instruction stream, among other things. Each occurrence reflects a restart penalty similar to a branch mispredict. This is relatively rare."
+ },
+ {
+ "EventName": "ex_ret_near_ret",
+ "EventCode": "0xc8",
+ "BriefDescription": "Retired Near Returns.",
+ "PublicDescription": "The number of near return instructions (RET or RET Iw) retired."
+ },
+ {
+ "EventName": "ex_ret_near_ret_mispred",
+ "EventCode": "0xc9",
+ "BriefDescription": "Retired Near Returns Mispredicted.",
+ "PublicDescription": "The number of near returns retired that were not correctly predicted by the return address predictor. Each such mispredict incurs the same penalty as a mispredicted conditional branch instruction."
+ },
+ {
+ "EventName": "ex_ret_brn_ind_misp",
+ "EventCode": "0xca",
+ "BriefDescription": "Retired Indirect Branch Instructions Mispredicted.",
+ "PublicDescription": "Retired Indirect Branch Instructions Mispredicted."
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.sse_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.mmx_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "MMX instructions.",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. MMX instructions.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.x87_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "x87 instructions.",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. x87 instructions.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ex_ret_cond",
+ "EventCode": "0xd1",
+ "BriefDescription": "Retired Conditional Branch Instructions."
+ },
+ {
+ "EventName": "ex_ret_cond_misp",
+ "EventCode": "0xd2",
+ "BriefDescription": "Retired Conditional Branch Instructions Mispredicted."
+ },
+ {
+ "EventName": "ex_div_busy",
+ "EventCode": "0xd3",
+ "BriefDescription": "Div Cycles Busy count."
+ },
+ {
+ "EventName": "ex_div_count",
+ "EventCode": "0xd4",
+ "BriefDescription": "Div Op Count."
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_count_rollover",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
+ "PublicDescription": "Tagged IBS Ops. Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Number of Ops tagged by IBS that retired.",
+ "PublicDescription": "Tagged IBS Ops. Number of Ops tagged by IBS that retired.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Number of Ops tagged by IBS.",
+ "PublicDescription": "Tagged IBS Ops. Number of Ops tagged by IBS.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ex_ret_fus_brnch_inst",
+ "EventCode": "0x1d0",
+ "BriefDescription": "The number of fused retired branch instructions retired per cycle. The number of events logged per cycle can vary from 0 to 3."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json b/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json
new file mode 100644
index 000000000000..ea4711983d1d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json
@@ -0,0 +1,168 @@
+[
+ {
+ "EventName": "fpu_pipe_assignment.dual",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number multi-pipe uOps.",
+ "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number multi-pipe uOps assigned to Pipe 3.",
+ "UMask": "0xf0"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number uOps.",
+ "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to Pipe 3.",
+ "UMask": "0xf"
+ },
+ {
+ "EventName": "fp_sched_empty",
+ "EventCode": "0x01",
+ "BriefDescription": "This is a speculative event. The number of cycles in which the FPU scheduler is empty. Note that some Ops like FP loads bypass the scheduler."
+ },
+ {
+ "EventName": "fp_retx87_fp_ops.all",
+ "EventCode": "0x02",
+ "BriefDescription": "All Ops.",
+ "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8.",
+ "UMask": "0x7"
+ },
+ {
+ "EventName": "fp_retx87_fp_ops.div_sqr_r_ops",
+ "EventCode": "0x02",
+ "BriefDescription": "Divide and square root Ops.",
+ "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Divide and square root Ops.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fp_retx87_fp_ops.mul_ops",
+ "EventCode": "0x02",
+ "BriefDescription": "Multiply Ops.",
+ "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Multiply Ops.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fp_retx87_fp_ops.add_sub_ops",
+ "EventCode": "0x02",
+ "BriefDescription": "Add/subtract Ops.",
+ "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Add/subtract Ops.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.all",
+ "EventCode": "0x03",
+ "BriefDescription": "All FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.dp_mult_add_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Double precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.dp_div_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Double precision divide/square root FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision divide/square root FLOPS.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.dp_mult_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Double precision multiply FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision multiply FLOPS.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.dp_add_sub_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Double precision add/subtract FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision add/subtract FLOPS.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.sp_mult_add_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Single precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.sp_div_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Single-precision divide/square root FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision divide/square root FLOPS.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.sp_mult_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Single-precision multiply FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision multiply FLOPS.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.sp_add_sub_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Single-precision add/subtract FLOPS.",
+ "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision add/subtract FLOPS.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.optimized",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of Scalar Ops optimized.",
+ "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of Scalar Ops optimized.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.opt_potential",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of Ops that are candidates for optimization (have Z-bit either set or pass).",
+ "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of Ops that are candidates for optimization (have Z-bit either set or pass).",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops_elim",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of SSE Move Ops eliminated.",
+ "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of SSE Move Ops eliminated.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of SSE Move Ops.",
+ "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of SSE Move Ops.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.x87_ctrl_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits.",
+ "PublicDescription": "The number of serializing Ops retired. x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.x87_bot_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "x87 bottom-executing uOps retired.",
+ "PublicDescription": "The number of serializing Ops retired. x87 bottom-executing uOps retired.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.sse_ctrl_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
+ "PublicDescription": "The number of serializing Ops retired. SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.sse_bot_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "SSE bottom-executing uOps retired.",
+ "PublicDescription": "The number of serializing Ops retired. SSE bottom-executing uOps retired.",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json b/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json
new file mode 100644
index 000000000000..fa2d60d4def0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json
@@ -0,0 +1,162 @@
+[
+ {
+ "EventName": "ls_locks.bus_lock",
+ "EventCode": "0x25",
+ "BriefDescription": "Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type.",
+ "PublicDescription": "Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_dispatch.ld_st_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Load-op-Stores.",
+ "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed. Load-op-Stores.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ls_dispatch.store_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_dispatch.ld_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_stlf",
+ "EventCode": "0x35",
+ "BriefDescription": "Number of STLF hits."
+ },
+ {
+ "EventName": "ls_dc_accesses",
+ "EventCode": "0x40",
+ "BriefDescription": "The number of accesses to the data cache for load and store references. This may include certain microcode scratchpad accesses, although these are generally rare. Each increment represents an eight-byte access, although the instruction may only be accessing a portion of that. This event is a speculative event."
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.all",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss or Reload off all sizes.",
+ "PublicDescription": "L1 DTLB Miss or Reload off all sizes.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss of a page of 1G size.",
+ "PublicDescription": "L1 DTLB Miss of a page of 1G size.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss of a page of 2M size.",
+ "PublicDescription": "L1 DTLB Miss of a page of 2M size.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_32k_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss of a page of 32K size.",
+ "PublicDescription": "L1 DTLB Miss of a page of 32K size.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss of a page of 4K size.",
+ "PublicDescription": "L1 DTLB Miss of a page of 4K size.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Reload of a page of 1G size.",
+ "PublicDescription": "L1 DTLB Reload of a page of 1G size.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Reload of a page of 2M size.",
+ "PublicDescription": "L1 DTLB Reload of a page of 2M size.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_32k_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Reload of a page of 32K size.",
+ "PublicDescription": "L1 DTLB Reload of a page of 32K size.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Reload of a page of 4K size.",
+ "PublicDescription": "L1 DTLB Reload of a page of 4K size.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_tablewalker.perf_mon_tablewalk_alloc_iside",
+ "EventCode": "0x46",
+ "BriefDescription": "Tablewalker allocation.",
+ "PublicDescription": "Tablewalker allocation.",
+ "UMask": "0xc"
+ },
+ {
+ "EventName": "ls_tablewalker.perf_mon_tablewalk_alloc_dside",
+ "EventCode": "0x46",
+ "BriefDescription": "Tablewalker allocation.",
+ "PublicDescription": "Tablewalker allocation.",
+ "UMask": "0x3"
+ },
+ {
+ "EventName": "ls_misal_accesses",
+ "EventCode": "0x47",
+ "BriefDescription": "Misaligned loads."
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch_nta",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions (PREFETCHNTA instruction) Dispatched.",
+ "PublicDescription": "Software Prefetch Instructions (PREFETCHNTA instruction) Dispatched.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.store_prefetch_w",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions (3DNow PREFETCHW instruction) Dispatched.",
+ "PublicDescription": "Software Prefetch Instructions (3DNow PREFETCHW instruction) Dispatched.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.load_prefetch_w",
+ "EventCode": "0x4b",
+ "BriefDescription": "Prefetch, Prefetch_T0_T1_T2.",
+ "PublicDescription": "Software Prefetch Instructions Dispatched. Prefetch, Prefetch_T0_T1_T2.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_inef_sw_pref.mab_mch_cnt",
+ "EventCode": "0x52",
+ "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+ "PublicDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit",
+ "EventCode": "0x52",
+ "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+ "PublicDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ls_not_halted_cyc",
+ "EventCode": "0x76",
+ "BriefDescription": "Cycles not in Halt."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/other.json b/tools/perf/pmu-events/arch/x86/amdfam17h/other.json
new file mode 100644
index 000000000000..b26a00d05a2e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/other.json
@@ -0,0 +1,65 @@
+[
+ {
+ "EventName": "ic_oc_mode_switch.oc_ic_mode_switch",
+ "EventCode": "0x28a",
+ "BriefDescription": "OC to IC mode switch.",
+ "PublicDescription": "OC Mode Switch. OC to IC mode switch.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "ic_oc_mode_switch.ic_oc_mode_switch",
+ "EventCode": "0x28a",
+ "BriefDescription": "IC to OC mode switch.",
+ "PublicDescription": "OC Mode Switch. IC to OC mode switch.",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.retire_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "RETIRE Tokens unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. RETIRE Tokens unavailable.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.agsq_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "AGSQ Tokens unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. AGSQ Tokens unavailable.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alu_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "ALU tokens total unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALU tokens total unavailable.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq3_0_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall.",
+ "UMask": "0x8"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq3_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "ALSQ 3 Tokens unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 3 Tokens unavailable.",
+ "UMask": "0x4"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq2_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "ALSQ 2 Tokens unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 2 Tokens unavailable.",
+ "UMask": "0x2"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls0.alsq1_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "ALSQ 1 Tokens unavailable.",
+ "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 1 Tokens unavailable.",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/frontend.json b/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
index 935b7dcf067d..ef69540ab61d 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
@@ -77,7 +77,7 @@
"UMask": "0x1",
"EventName": "UOPS.MS_CYCLES",
"SampleAfterValue": "2000000",
- "BriefDescription": "This event counts the cycles where 1 or more uops are issued by the micro-sequencer (MS), including microcode assists and inserted flows, and written to the IQ. ",
+ "BriefDescription": "This event counts the cycles where 1 or more uops are issued by the micro-sequencer (MS), including microcode assists and inserted flows, and written to the IQ.",
"CounterMask": "1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json b/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
index b2e681c78466..09c6de13de20 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
@@ -189,7 +189,7 @@
"UMask": "0x8",
"EventName": "BR_MISSP_TYPE_RETIRED.IND_CALL",
"SampleAfterValue": "200000",
- "BriefDescription": "Mispredicted indirect calls, including both register and memory indirect. "
+ "BriefDescription": "Mispredicted indirect calls, including both register and memory indirect."
},
{
"EventCode": "0x89",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
index 00bfdb5c5acb..212b117a8ffb 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
@@ -1,164 +1,352 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
- "MetricGroup": "Frontend",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED )) ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "Branch_Misprediction_Cost"
},
{
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts_SMT",
+ "MetricName": "Branch_Misprediction_Cost_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED)) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/cache.json b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
index 0b080b0352d8..7938bf5689ab 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
@@ -56,10 +56,10 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x41",
+ "UMask": "0xc1",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "Demand Data Read requests that hit L2 cache",
@@ -68,7 +68,7 @@
{
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x42",
+ "UMask": "0xc2",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "RFO requests that hit L2 cache.",
@@ -77,7 +77,7 @@
{
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x44",
+ "UMask": "0xc4",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
@@ -87,7 +87,7 @@
"PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x50",
+ "UMask": "0xd0",
"EventName": "L2_RQSTS.L2_PF_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "L2 prefetch requests that hit L2 cache",
@@ -433,7 +433,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x41",
@@ -445,7 +445,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x42",
@@ -771,2628 +771,2628 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010001 ",
+ "MSRValue": "0x0000010001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that have any response type.",
+ "BriefDescription": "Counts demand data reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020001 ",
+ "MSRValue": "0x0080020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020001 ",
+ "MSRValue": "0x0100020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020001 ",
+ "MSRValue": "0x0200020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020001 ",
+ "MSRValue": "0x0400020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020001 ",
+ "MSRValue": "0x1000020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020001 ",
+ "MSRValue": "0x3F80020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0001 ",
+ "MSRValue": "0x00803C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0001 ",
+ "MSRValue": "0x01003C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0001 ",
+ "MSRValue": "0x02003C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0001 ",
+ "MSRValue": "0x04003C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0001 ",
+ "MSRValue": "0x10003C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0001 ",
+ "MSRValue": "0x3F803C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010002 ",
+ "MSRValue": "0x0000010002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that have any response type.",
+ "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0002 ",
+ "MSRValue": "0x00803C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0002 ",
+ "MSRValue": "0x01003C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0002 ",
+ "MSRValue": "0x02003C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0002 ",
+ "MSRValue": "0x04003C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0002 ",
+ "MSRValue": "0x10003C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0002 ",
+ "MSRValue": "0x3F803C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3.",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010004 ",
+ "MSRValue": "0x0000010004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that have any response type.",
+ "BriefDescription": "Counts all demand code reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020004 ",
+ "MSRValue": "0x0080020004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020004 ",
+ "MSRValue": "0x0100020004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020004 ",
+ "MSRValue": "0x0200020004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020004 ",
+ "MSRValue": "0x0400020004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020004 ",
+ "MSRValue": "0x1000020004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020004 ",
+ "MSRValue": "0x3F80020004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0004 ",
+ "MSRValue": "0x00803C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0004 ",
+ "MSRValue": "0x01003C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0004 ",
+ "MSRValue": "0x02003C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0004 ",
+ "MSRValue": "0x04003C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0004 ",
+ "MSRValue": "0x10003C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0004 ",
+ "MSRValue": "0x3F803C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3.",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive) have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010008 ",
+ "MSRValue": "0x0000010008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that have any response type.",
+ "BriefDescription": "Counts writebacks (modified to exclusive) have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020008 ",
+ "MSRValue": "0x0080020008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020008 ",
+ "MSRValue": "0x0100020008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020008 ",
+ "MSRValue": "0x0200020008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020008 ",
+ "MSRValue": "0x0400020008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020008 ",
+ "MSRValue": "0x1000020008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020008 ",
+ "MSRValue": "0x3F80020008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0008 ",
+ "MSRValue": "0x00803C0008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0008 ",
+ "MSRValue": "0x01003C0008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0008 ",
+ "MSRValue": "0x02003C0008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0008 ",
+ "MSRValue": "0x04003C0008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0008 ",
+ "MSRValue": "0x10003C0008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0008 ",
+ "MSRValue": "0x3F803C0008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3.",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010010 ",
+ "MSRValue": "0x0000010010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that have any response type.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020010 ",
+ "MSRValue": "0x0080020010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020010 ",
+ "MSRValue": "0x0100020010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020010 ",
+ "MSRValue": "0x0200020010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020010 ",
+ "MSRValue": "0x0400020010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020010 ",
+ "MSRValue": "0x1000020010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020010 ",
+ "MSRValue": "0x3F80020010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0010 ",
+ "MSRValue": "0x00803C0010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0010 ",
+ "MSRValue": "0x01003C0010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0010 ",
+ "MSRValue": "0x02003C0010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0010 ",
+ "MSRValue": "0x04003C0010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0010 ",
+ "MSRValue": "0x10003C0010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0010 ",
+ "MSRValue": "0x3F803C0010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010020 ",
+ "MSRValue": "0x0000010020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020020 ",
+ "MSRValue": "0x0080020020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020020 ",
+ "MSRValue": "0x0100020020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020020 ",
+ "MSRValue": "0x0200020020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020020 ",
+ "MSRValue": "0x0400020020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020020 ",
+ "MSRValue": "0x1000020020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020020 ",
+ "MSRValue": "0x3F80020020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0020 ",
+ "MSRValue": "0x00803C0020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0020 ",
+ "MSRValue": "0x01003C0020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0020 ",
+ "MSRValue": "0x02003C0020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0020 ",
+ "MSRValue": "0x04003C0020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0020 ",
+ "MSRValue": "0x10003C0020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0020 ",
+ "MSRValue": "0x3F803C0020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010040 ",
+ "MSRValue": "0x0000010040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that have any response type.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020040 ",
+ "MSRValue": "0x0080020040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020040 ",
+ "MSRValue": "0x0100020040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020040 ",
+ "MSRValue": "0x0200020040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020040 ",
+ "MSRValue": "0x0400020040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020040 ",
+ "MSRValue": "0x1000020040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020040 ",
+ "MSRValue": "0x3F80020040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0040 ",
+ "MSRValue": "0x00803C0040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0040 ",
+ "MSRValue": "0x01003C0040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0040 ",
+ "MSRValue": "0x02003C0040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0040 ",
+ "MSRValue": "0x04003C0040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0040 ",
+ "MSRValue": "0x10003C0040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0040 ",
+ "MSRValue": "0x3F803C0040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010080 ",
+ "MSRValue": "0x0000010080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020080 ",
+ "MSRValue": "0x0080020080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020080 ",
+ "MSRValue": "0x0100020080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020080 ",
+ "MSRValue": "0x0200020080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020080 ",
+ "MSRValue": "0x0400020080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020080 ",
+ "MSRValue": "0x1000020080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020080 ",
+ "MSRValue": "0x3F80020080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0080 ",
+ "MSRValue": "0x00803C0080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0080 ",
+ "MSRValue": "0x01003C0080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0080 ",
+ "MSRValue": "0x02003C0080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0080 ",
+ "MSRValue": "0x04003C0080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0080 ",
+ "MSRValue": "0x10003C0080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0080 ",
+ "MSRValue": "0x3F803C0080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010100 ",
+ "MSRValue": "0x0000010100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020100 ",
+ "MSRValue": "0x0080020100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020100 ",
+ "MSRValue": "0x0100020100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020100 ",
+ "MSRValue": "0x0200020100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020100 ",
+ "MSRValue": "0x0400020100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020100 ",
+ "MSRValue": "0x1000020100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020100 ",
+ "MSRValue": "0x3F80020100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0100 ",
+ "MSRValue": "0x00803C0100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0100 ",
+ "MSRValue": "0x01003C0100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0100 ",
+ "MSRValue": "0x02003C0100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0100 ",
+ "MSRValue": "0x04003C0100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0100 ",
+ "MSRValue": "0x10003C0100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0100 ",
+ "MSRValue": "0x3F803C0100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010200 ",
+ "MSRValue": "0x0000010200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that have any response type.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020200 ",
+ "MSRValue": "0x0080020200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020200 ",
+ "MSRValue": "0x0100020200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020200 ",
+ "MSRValue": "0x0200020200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020200 ",
+ "MSRValue": "0x0400020200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020200 ",
+ "MSRValue": "0x1000020200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020200 ",
+ "MSRValue": "0x3F80020200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0200 ",
+ "MSRValue": "0x00803C0200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0200 ",
+ "MSRValue": "0x01003C0200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0200 ",
+ "MSRValue": "0x02003C0200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0200 ",
+ "MSRValue": "0x04003C0200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0200 ",
+ "MSRValue": "0x10003C0200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0200 ",
+ "MSRValue": "0x3F803C0200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000018000 ",
+ "MSRValue": "0x0000018000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that have any response type.",
+ "BriefDescription": "Counts any other requests have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080028000 ",
+ "MSRValue": "0x0080028000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100028000 ",
+ "MSRValue": "0x0100028000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200028000 ",
+ "MSRValue": "0x0200028000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400028000 ",
+ "MSRValue": "0x0400028000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000028000 ",
+ "MSRValue": "0x1000028000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80028000 ",
+ "MSRValue": "0x3F80028000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c8000 ",
+ "MSRValue": "0x00803C8000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c8000 ",
+ "MSRValue": "0x01003C8000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c8000 ",
+ "MSRValue": "0x02003C8000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c8000 ",
+ "MSRValue": "0x04003C8000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c8000 ",
+ "MSRValue": "0x10003C8000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c8000 ",
+ "MSRValue": "0x3F803C8000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3.",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010090 ",
+ "MSRValue": "0x0000010090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that have any response type.",
+ "BriefDescription": "Counts all prefetch data reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020090 ",
+ "MSRValue": "0x0080020090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020090 ",
+ "MSRValue": "0x0100020090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020090 ",
+ "MSRValue": "0x0200020090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020090 ",
+ "MSRValue": "0x0400020090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020090 ",
+ "MSRValue": "0x1000020090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020090 ",
+ "MSRValue": "0x3F80020090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0090 ",
+ "MSRValue": "0x00803C0090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0090 ",
+ "MSRValue": "0x01003C0090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0090 ",
+ "MSRValue": "0x02003C0090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0090 ",
+ "MSRValue": "0x04003C0090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0090 ",
+ "MSRValue": "0x10003C0090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0090 ",
+ "MSRValue": "0x3F803C0090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3.",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010120 ",
+ "MSRValue": "0x0000010120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that have any response type.",
+ "BriefDescription": "Counts prefetch RFOs have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020120 ",
+ "MSRValue": "0x0080020120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020120 ",
+ "MSRValue": "0x0100020120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020120 ",
+ "MSRValue": "0x0200020120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020120 ",
+ "MSRValue": "0x0400020120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020120 ",
+ "MSRValue": "0x1000020120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020120 ",
+ "MSRValue": "0x3F80020120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0120 ",
+ "MSRValue": "0x00803C0120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0120 ",
+ "MSRValue": "0x01003C0120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0120 ",
+ "MSRValue": "0x02003C0120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0120 ",
+ "MSRValue": "0x04003C0120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0120 ",
+ "MSRValue": "0x10003C0120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0120 ",
+ "MSRValue": "0x3F803C0120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3.",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010240 ",
+ "MSRValue": "0x0000010240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that have any response type.",
+ "BriefDescription": "Counts all prefetch code reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020240 ",
+ "MSRValue": "0x0080020240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020240 ",
+ "MSRValue": "0x0100020240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020240 ",
+ "MSRValue": "0x0200020240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020240 ",
+ "MSRValue": "0x0400020240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020240 ",
+ "MSRValue": "0x1000020240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020240 ",
+ "MSRValue": "0x3F80020240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0240 ",
+ "MSRValue": "0x00803C0240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0240 ",
+ "MSRValue": "0x01003C0240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0240 ",
+ "MSRValue": "0x02003C0240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0240 ",
+ "MSRValue": "0x04003C0240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0240 ",
+ "MSRValue": "0x10003C0240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0240 ",
+ "MSRValue": "0x3F803C0240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that hit in the L3.",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010091 ",
+ "MSRValue": "0x0000010091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that have any response type.",
+ "BriefDescription": "Counts all demand & prefetch data reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020091 ",
+ "MSRValue": "0x0080020091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020091 ",
+ "MSRValue": "0x0100020091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020091 ",
+ "MSRValue": "0x0200020091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020091 ",
+ "MSRValue": "0x0400020091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020091 ",
+ "MSRValue": "0x1000020091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020091 ",
+ "MSRValue": "0x3F80020091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0091 ",
+ "MSRValue": "0x00803C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0091 ",
+ "MSRValue": "0x01003C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0091 ",
+ "MSRValue": "0x02003C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0091 ",
+ "MSRValue": "0x04003C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0091 ",
+ "MSRValue": "0x10003C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0091 ",
+ "MSRValue": "0x3F803C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3.",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010122 ",
+ "MSRValue": "0x0000010122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that have any response type.",
+ "BriefDescription": "Counts all demand & prefetch RFOs have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020122 ",
+ "MSRValue": "0x0080020122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020122 ",
+ "MSRValue": "0x0100020122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020122 ",
+ "MSRValue": "0x0200020122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020122 ",
+ "MSRValue": "0x0400020122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020122 ",
+ "MSRValue": "0x1000020122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f80020122 ",
+ "MSRValue": "0x3F80020122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803c0122 ",
+ "MSRValue": "0x00803C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003c0122 ",
+ "MSRValue": "0x01003C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003c0122 ",
+ "MSRValue": "0x02003C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0122 ",
+ "MSRValue": "0x04003C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0122 ",
+ "MSRValue": "0x10003C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0122 ",
+ "MSRValue": "0x3F803C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3.",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
index 689d478dae93..15291239c128 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
@@ -1,24 +1,26 @@
[
{
- "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x8",
"Errata": "BDM30",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable (Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x10",
"Errata": "BDM30",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "BriefDescription": "Number of transitions from legacy SSE to AVX-256 when penalty applicable (Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -45,7 +47,7 @@
"UMask": "0x3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single precision?)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -54,7 +56,7 @@
"UMask": "0x4",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -63,7 +65,7 @@
"UMask": "0x8",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -72,7 +74,7 @@
"UMask": "0x10",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -81,7 +83,7 @@
"UMask": "0x15",
"EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
"SampleAfterValue": "2000006",
- "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -90,7 +92,7 @@
"UMask": "0x20",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -99,7 +101,7 @@
"UMask": "0x2a",
"EventName": "FP_ARITH_INST_RETIRED.SINGLE",
"SampleAfterValue": "2000005",
- "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -108,57 +110,62 @@
"UMask": "0x3c",
"EventName": "FP_ARITH_INST_RETIRED.PACKED",
"SampleAfterValue": "2000004",
- "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single-precision?)",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "FP_ASSIST.X87_OUTPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to output value.",
+ "BriefDescription": "output - Numeric Overflow, Numeric Underflow, Inexact Result (Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "FP_ASSIST.X87_INPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to input value.",
+ "BriefDescription": "input - Invalid Operation, Denormal Operand, SNaN Operand (Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to Output values",
+ "BriefDescription": "SSE* FP micro-code assist when output value is invalid. (Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts any input SSE* floating-point (FP) assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "FP_ASSIST.SIMD_INPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to input values",
+ "BriefDescription": "Any input SSE* FP Assist - (Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
+ "PEBS": "1",
+ "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1. Uses PEBS.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x1e",
"EventName": "FP_ASSIST.ANY",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "BriefDescription": "Counts any FP_ASSIST umask was incrementing (Precise Event)",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
index 7142c76d7f11..aa4a5d762f21 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
@@ -211,7 +211,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -274,7 +274,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.",
"EventCode": "0xAB",
"Counter": "0,1,2,3",
"UMask": "0x2",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/memory.json b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
index c9154cebbdf0..b6b5247d3d5a 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
@@ -311,7 +311,7 @@
},
{
"PEBS": "2",
- "PublicDescription": "This event counts loads with latency value being above four.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above four.",
"EventCode": "0xCD",
"MSRValue": "0x4",
"Counter": "3",
@@ -320,13 +320,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
"SampleAfterValue": "100003",
- "BriefDescription": "Loads with latency value being above 4",
+ "BriefDescription": "Randomly selected loads with latency value being above 4",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
"PEBS": "2",
- "PublicDescription": "This event counts loads with latency value being above eight.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above eight.",
"EventCode": "0xCD",
"MSRValue": "0x8",
"Counter": "3",
@@ -335,13 +335,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
"SampleAfterValue": "50021",
- "BriefDescription": "Loads with latency value being above 8",
+ "BriefDescription": "Randomly selected loads with latency value being above 8",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
"PEBS": "2",
- "PublicDescription": "This event counts loads with latency value being above 16.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 16.",
"EventCode": "0xCD",
"MSRValue": "0x10",
"Counter": "3",
@@ -350,13 +350,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
"SampleAfterValue": "20011",
- "BriefDescription": "Loads with latency value being above 16",
+ "BriefDescription": "Randomly selected loads with latency value being above 16",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
"PEBS": "2",
- "PublicDescription": "This event counts loads with latency value being above 32.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 32.",
"EventCode": "0xCD",
"MSRValue": "0x20",
"Counter": "3",
@@ -365,13 +365,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
"SampleAfterValue": "100007",
- "BriefDescription": "Loads with latency value being above 32",
+ "BriefDescription": "Randomly selected loads with latency value being above 32",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
"PEBS": "2",
- "PublicDescription": "This event counts loads with latency value being above 64.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 64.",
"EventCode": "0xCD",
"MSRValue": "0x40",
"Counter": "3",
@@ -380,13 +380,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
"SampleAfterValue": "2003",
- "BriefDescription": "Loads with latency value being above 64",
+ "BriefDescription": "Randomly selected loads with latency value being above 64",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
"PEBS": "2",
- "PublicDescription": "This event counts loads with latency value being above 128.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 128.",
"EventCode": "0xCD",
"MSRValue": "0x80",
"Counter": "3",
@@ -395,13 +395,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
"SampleAfterValue": "1009",
- "BriefDescription": "Loads with latency value being above 128",
+ "BriefDescription": "Randomly selected loads with latency value being above 128",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
"PEBS": "2",
- "PublicDescription": "This event counts loads with latency value being above 256.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 256.",
"EventCode": "0xCD",
"MSRValue": "0x100",
"Counter": "3",
@@ -410,13 +410,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
"SampleAfterValue": "503",
- "BriefDescription": "Loads with latency value being above 256",
+ "BriefDescription": "Randomly selected loads with latency value being above 256",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
"PEBS": "2",
- "PublicDescription": "This event counts loads with latency value being above 512.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 512.",
"EventCode": "0xCD",
"MSRValue": "0x200",
"Counter": "3",
@@ -425,2620 +425,2620 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
"SampleAfterValue": "101",
- "BriefDescription": "Loads with latency value being above 512",
+ "BriefDescription": "Randomly selected loads with latency value being above 512",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020001 ",
+ "MSRValue": "0x2000020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0001 ",
+ "MSRValue": "0x20003C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000001 ",
+ "MSRValue": "0x0084000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000001 ",
+ "MSRValue": "0x0104000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000001 ",
+ "MSRValue": "0x0204000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000001 ",
+ "MSRValue": "0x0404000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000001 ",
+ "MSRValue": "0x1004000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000001 ",
+ "MSRValue": "0x2004000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000001 ",
+ "MSRValue": "0x3F84000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000001 ",
+ "MSRValue": "0x00BC000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000001 ",
+ "MSRValue": "0x013C000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000001 ",
+ "MSRValue": "0x023C000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000001 ",
+ "MSRValue": "0x043C000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0002 ",
+ "MSRValue": "0x20003C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000002 ",
+ "MSRValue": "0x3F84000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000002 ",
+ "MSRValue": "0x00BC000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000002 ",
+ "MSRValue": "0x013C000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000002 ",
+ "MSRValue": "0x023C000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000002 ",
+ "MSRValue": "0x043C000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020004 ",
+ "MSRValue": "0x2000020004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0004 ",
+ "MSRValue": "0x20003C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000004 ",
+ "MSRValue": "0x0084000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000004 ",
+ "MSRValue": "0x0104000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000004 ",
+ "MSRValue": "0x0204000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000004 ",
+ "MSRValue": "0x0404000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000004 ",
+ "MSRValue": "0x1004000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000004 ",
+ "MSRValue": "0x2004000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000004 ",
+ "MSRValue": "0x3F84000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000004 ",
+ "MSRValue": "0x00BC000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000004 ",
+ "MSRValue": "0x013C000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000004 ",
+ "MSRValue": "0x023C000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000004 ",
+ "MSRValue": "0x043C000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020008 ",
+ "MSRValue": "0x2000020008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0008 ",
+ "MSRValue": "0x20003C0008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000008 ",
+ "MSRValue": "0x0084000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000008 ",
+ "MSRValue": "0x0104000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000008 ",
+ "MSRValue": "0x0204000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000008 ",
+ "MSRValue": "0x0404000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000008 ",
+ "MSRValue": "0x1004000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000008 ",
+ "MSRValue": "0x2004000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000008 ",
+ "MSRValue": "0x3F84000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000008 ",
+ "MSRValue": "0x00BC000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000008 ",
+ "MSRValue": "0x013C000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000008 ",
+ "MSRValue": "0x023C000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000008 ",
+ "MSRValue": "0x043C000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020010 ",
+ "MSRValue": "0x2000020010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0010 ",
+ "MSRValue": "0x20003C0010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000010 ",
+ "MSRValue": "0x0084000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000010 ",
+ "MSRValue": "0x0104000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000010 ",
+ "MSRValue": "0x0204000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000010 ",
+ "MSRValue": "0x0404000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000010 ",
+ "MSRValue": "0x1004000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000010 ",
+ "MSRValue": "0x2004000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000010 ",
+ "MSRValue": "0x3F84000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000010 ",
+ "MSRValue": "0x00BC000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000010 ",
+ "MSRValue": "0x013C000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000010 ",
+ "MSRValue": "0x023C000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000010 ",
+ "MSRValue": "0x043C000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020020 ",
+ "MSRValue": "0x2000020020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0020 ",
+ "MSRValue": "0x20003C0020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000020 ",
+ "MSRValue": "0x0084000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000020 ",
+ "MSRValue": "0x0104000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000020 ",
+ "MSRValue": "0x0204000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000020 ",
+ "MSRValue": "0x0404000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000020 ",
+ "MSRValue": "0x1004000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000020 ",
+ "MSRValue": "0x2004000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000020 ",
+ "MSRValue": "0x3F84000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000020 ",
+ "MSRValue": "0x00BC000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000020 ",
+ "MSRValue": "0x013C000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000020 ",
+ "MSRValue": "0x023C000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000020 ",
+ "MSRValue": "0x043C000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020040 ",
+ "MSRValue": "0x2000020040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0040 ",
+ "MSRValue": "0x20003C0040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000040 ",
+ "MSRValue": "0x0084000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000040 ",
+ "MSRValue": "0x0104000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000040 ",
+ "MSRValue": "0x0204000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000040 ",
+ "MSRValue": "0x0404000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000040 ",
+ "MSRValue": "0x1004000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000040 ",
+ "MSRValue": "0x2004000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000040 ",
+ "MSRValue": "0x3F84000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000040 ",
+ "MSRValue": "0x00BC000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000040 ",
+ "MSRValue": "0x013C000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000040 ",
+ "MSRValue": "0x023C000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000040 ",
+ "MSRValue": "0x043C000040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L2_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020080 ",
+ "MSRValue": "0x2000020080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0080 ",
+ "MSRValue": "0x20003C0080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000080 ",
+ "MSRValue": "0x0084000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000080 ",
+ "MSRValue": "0x0104000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000080 ",
+ "MSRValue": "0x0204000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000080 ",
+ "MSRValue": "0x0404000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000080 ",
+ "MSRValue": "0x1004000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000080 ",
+ "MSRValue": "0x2004000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000080 ",
+ "MSRValue": "0x3F84000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000080 ",
+ "MSRValue": "0x00BC000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000080 ",
+ "MSRValue": "0x013C000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000080 ",
+ "MSRValue": "0x023C000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000080 ",
+ "MSRValue": "0x043C000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020100 ",
+ "MSRValue": "0x2000020100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0100 ",
+ "MSRValue": "0x20003C0100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000100 ",
+ "MSRValue": "0x0084000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000100 ",
+ "MSRValue": "0x0104000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000100 ",
+ "MSRValue": "0x0204000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000100 ",
+ "MSRValue": "0x0404000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000100 ",
+ "MSRValue": "0x1004000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000100 ",
+ "MSRValue": "0x2004000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000100 ",
+ "MSRValue": "0x3F84000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000100 ",
+ "MSRValue": "0x00BC000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000100 ",
+ "MSRValue": "0x013C000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000100 ",
+ "MSRValue": "0x023C000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000100 ",
+ "MSRValue": "0x043C000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020200 ",
+ "MSRValue": "0x2000020200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0200 ",
+ "MSRValue": "0x20003C0200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000200 ",
+ "MSRValue": "0x0084000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000200 ",
+ "MSRValue": "0x0104000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000200 ",
+ "MSRValue": "0x0204000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000200 ",
+ "MSRValue": "0x0404000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000200 ",
+ "MSRValue": "0x1004000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000200 ",
+ "MSRValue": "0x2004000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000200 ",
+ "MSRValue": "0x3F84000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000200 ",
+ "MSRValue": "0x00BC000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000200 ",
+ "MSRValue": "0x013C000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000200 ",
+ "MSRValue": "0x023C000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000200 ",
+ "MSRValue": "0x043C000200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000028000 ",
+ "MSRValue": "0x2000028000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c8000 ",
+ "MSRValue": "0x20003C8000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084008000 ",
+ "MSRValue": "0x0084008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104008000 ",
+ "MSRValue": "0x0104008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204008000 ",
+ "MSRValue": "0x0204008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404008000 ",
+ "MSRValue": "0x0404008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004008000 ",
+ "MSRValue": "0x1004008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004008000 ",
+ "MSRValue": "0x2004008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84008000 ",
+ "MSRValue": "0x3F84008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc008000 ",
+ "MSRValue": "0x00BC008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c008000 ",
+ "MSRValue": "0x013C008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts any other requests that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c008000 ",
+ "MSRValue": "0x023C008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c008000 ",
+ "MSRValue": "0x043C008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts any other requests",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020090 ",
+ "MSRValue": "0x2000020090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0090 ",
+ "MSRValue": "0x20003C0090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000090 ",
+ "MSRValue": "0x0084000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000090 ",
+ "MSRValue": "0x0104000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000090 ",
+ "MSRValue": "0x0204000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000090 ",
+ "MSRValue": "0x0404000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000090 ",
+ "MSRValue": "0x1004000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000090 ",
+ "MSRValue": "0x2004000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000090 ",
+ "MSRValue": "0x3F84000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000090 ",
+ "MSRValue": "0x00BC000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000090 ",
+ "MSRValue": "0x013C000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000090 ",
+ "MSRValue": "0x023C000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000090 ",
+ "MSRValue": "0x043C000090",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020120 ",
+ "MSRValue": "0x2000020120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0120 ",
+ "MSRValue": "0x20003C0120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000120 ",
+ "MSRValue": "0x0084000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000120 ",
+ "MSRValue": "0x0104000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000120 ",
+ "MSRValue": "0x0204000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000120 ",
+ "MSRValue": "0x0404000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000120 ",
+ "MSRValue": "0x1004000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000120 ",
+ "MSRValue": "0x2004000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000120 ",
+ "MSRValue": "0x3F84000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000120 ",
+ "MSRValue": "0x00BC000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000120 ",
+ "MSRValue": "0x013C000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000120 ",
+ "MSRValue": "0x023C000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000120 ",
+ "MSRValue": "0x043C000120",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020240 ",
+ "MSRValue": "0x2000020240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0240 ",
+ "MSRValue": "0x20003C0240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000240 ",
+ "MSRValue": "0x0084000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000240 ",
+ "MSRValue": "0x0104000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000240 ",
+ "MSRValue": "0x0204000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000240 ",
+ "MSRValue": "0x0404000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000240 ",
+ "MSRValue": "0x1004000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000240 ",
+ "MSRValue": "0x2004000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000240 ",
+ "MSRValue": "0x3F84000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000240 ",
+ "MSRValue": "0x00BC000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000240 ",
+ "MSRValue": "0x013C000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000240 ",
+ "MSRValue": "0x023C000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000240 ",
+ "MSRValue": "0x043C000240",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_PF_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all prefetch code reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020091 ",
+ "MSRValue": "0x2000020091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0091 ",
+ "MSRValue": "0x20003C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000091 ",
+ "MSRValue": "0x0084000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000091 ",
+ "MSRValue": "0x0104000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000091 ",
+ "MSRValue": "0x0204000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000091 ",
+ "MSRValue": "0x0404000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000091 ",
+ "MSRValue": "0x1004000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000091 ",
+ "MSRValue": "0x2004000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000091 ",
+ "MSRValue": "0x3F84000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000091 ",
+ "MSRValue": "0x00BC000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000091 ",
+ "MSRValue": "0x013C000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000091 ",
+ "MSRValue": "0x023C000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000091 ",
+ "MSRValue": "0x043C000091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020122 ",
+ "MSRValue": "0x2000020122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003c0122 ",
+ "MSRValue": "0x20003C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the target was non-DRAM system address.",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000122 ",
+ "MSRValue": "0x0084000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000122 ",
+ "MSRValue": "0x0104000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000122 ",
+ "MSRValue": "0x0204000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000122 ",
+ "MSRValue": "0x0404000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000122 ",
+ "MSRValue": "0x1004000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000122 ",
+ "MSRValue": "0x2004000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f84000122 ",
+ "MSRValue": "0x3F84000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000122 ",
+ "MSRValue": "0x00BC000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 with no details on snoop-related information.",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000122 ",
+ "MSRValue": "0x013C000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000122 ",
+ "MSRValue": "0x023C000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 with a snoop miss response.",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000122 ",
+ "MSRValue": "0x043C000122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "ALL_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
index 999cf3066363..bb25574b8d21 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
@@ -1,7 +1,6 @@
[
{
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
- "EventCode": "0x00",
"Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
@@ -11,7 +10,6 @@
},
{
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -20,7 +18,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"AnyThread": "1",
@@ -31,7 +28,6 @@
},
{
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
@@ -317,7 +313,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
+ "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
"EventCode": "0x87",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -786,8 +782,8 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
- "EventCode": "0xA2",
+ "PublicDescription": "This event counts resource-related stall cycles.",
+ "EventCode": "0xa2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "RESOURCE_STALLS.ANY",
@@ -973,6 +969,7 @@
"CounterHTOff": "2"
},
{
+ "PublicDescription": "Number of Uops delivered by the LSD.",
"EventCode": "0xA8",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -1147,7 +1144,8 @@
"CounterHTOff": "1"
},
{
- "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
"EventCode": "0xC0",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -1157,12 +1155,12 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PEBS": "1",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x40",
"EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -1178,26 +1176,28 @@
"Data_LA": "1"
},
{
- "PublicDescription": "This event counts cycles without actually retired uops.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts cycles without actually retired uops.",
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
+ "BriefDescription": "Cycles no executable uops retired (Precise Event)",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "PEBS": "1",
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to PEBS uops retired event.",
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "BriefDescription": "Number of cycles using always true condition applied to PEBS uops retired event.",
"CounterMask": "10",
"CounterHTOff": "0,1,2,3"
},
@@ -1320,13 +1320,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts not taken branch instructions retired.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts not taken branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired.",
+ "BriefDescription": "Counts all not taken macro branch instructions retired. (Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -1341,14 +1342,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts far branch instructions retired.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts far branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x40",
"Errata": "BDW98",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired.",
+ "BriefDescription": "Counts the number of far branch instructions retired.(Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
index 4ad425312bdc..bf243fe2a0ec 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
@@ -439,7 +439,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -451,7 +451,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
index 0d04bf9db000..e2f0540625a2 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
@@ -1,6 +1,5 @@
[
{
- "EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
"Counter": "Fixed counter 0",
@@ -10,7 +9,6 @@
"CounterHTOff": "Fixed counter 0"
},
{
- "EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state",
"Counter": "Fixed counter 1",
@@ -20,7 +18,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"Counter": "Fixed counter 1",
@@ -30,7 +27,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
"Counter": "Fixed counter 2",
@@ -322,7 +318,7 @@
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
"Counter": "0,1,2,3",
"EventName": "ILD_STALL.LCP",
- "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
+ "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
index 5a7f1ec24200..c6f9762f32c0 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
@@ -1,164 +1,370 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
- "MetricGroup": "Frontend",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED )) ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "Branch_Misprediction_Cost"
+ },
+ {
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts_SMT",
+ "MetricName": "Branch_Misprediction_Cost_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
},
{
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * cycles )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED) ) / (2*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles))",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )",
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_Lat",
+ "MetricName": "DRAM_Read_Latency"
+ },
+ {
+ "MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@",
+ "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_Parallel_Reads"
+ },
+ {
+ "MetricExpr": "cbox_0@event\\=0x0@",
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricGroup": "",
+ "MetricName": "Socket_CLKS"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
index 141b1080429d..75a3098d5775 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
@@ -57,17 +57,17 @@
},
{
"EventCode": "0x24",
- "UMask": "0x41",
+ "UMask": "0xc1",
"BriefDescription": "Demand Data Read requests that hit L2 cache",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache.",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x24",
- "UMask": "0x42",
+ "UMask": "0xc2",
"BriefDescription": "RFO requests that hit L2 cache.",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.RFO_HIT",
@@ -76,7 +76,7 @@
},
{
"EventCode": "0x24",
- "UMask": "0x44",
+ "UMask": "0xc4",
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.CODE_RD_HIT",
@@ -85,7 +85,7 @@
},
{
"EventCode": "0x24",
- "UMask": "0x50",
+ "UMask": "0xd0",
"BriefDescription": "L2 prefetch requests that hit L2 cache",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.L2_PF_HIT",
@@ -396,24 +396,24 @@
{
"EventCode": "0xD0",
"UMask": "0x11",
- "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops that miss the STLB.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This event counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x12",
- "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS)",
+ "BriefDescription": "Retired store uops that miss the STLB.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This event counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -421,37 +421,37 @@
{
"EventCode": "0xD0",
"UMask": "0x21",
- "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops with locked access.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"Errata": "BDM35",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with locked access retired to the architected path.",
+ "PublicDescription": "This event counts load uops with locked access retired to the architected path.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x41",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.(Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x42",
- "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -459,24 +459,24 @@
{
"EventCode": "0xD0",
"UMask": "0x81",
- "BriefDescription": "All retired load uops. (Precise Event - PEBS)",
+ "BriefDescription": "All retired load uops.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
+ "PublicDescription": "This event counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x82",
- "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
+ "BriefDescription": "All retired store uops.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
+ "PublicDescription": "This event counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
"SampleAfterValue": "2000003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -484,69 +484,69 @@
{
"EventCode": "0xD1",
"UMask": "0x1",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data source were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
+ "PublicDescription": "This event counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x2",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"Errata": "BDM35",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+ "PublicDescription": "This event counts retired load uops which data sources were hits in the mid-level (L2) cache.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x4",
- "BriefDescription": "Hit in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"Errata": "BDM100",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This event counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x8",
- "BriefDescription": "Retired load uops misses in L1 cache as data sources. Uses PEBS.",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This event counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x10",
- "BriefDescription": "Retired load uops with L2 cache misses as data sources. Uses PEBS.",
+ "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This event counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x20",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS).",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -558,83 +558,84 @@
{
"EventCode": "0xD1",
"UMask": "0x40",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
+ "PublicDescription": "This event counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x1",
- "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
"Errata": "BDM100",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+ "PublicDescription": "This event counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x2",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"Errata": "BDM100",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+ "PublicDescription": "This event counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"Errata": "BDM100",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+ "PublicDescription": "This event counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x8",
- "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
"Errata": "BDM100",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This event counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x1",
+ "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"Errata": "BDE70, BDM100",
- "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
+ "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x4",
- "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -646,7 +647,7 @@
{
"EventCode": "0xD3",
"UMask": "0x10",
- "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -658,7 +659,7 @@
{
"EventCode": "0xD3",
"UMask": "0x20",
- "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -810,12 +811,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all requests that hit in the L3",
- "MSRValue": "0x3f803c8fff",
+ "BriefDescription": "Counts all requests hit in the L3",
+ "MSRValue": "0x3F803C8FFF",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all requests hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -823,12 +824,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c07f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C07F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -836,12 +837,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c07f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C07F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -849,12 +850,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0244",
+ "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0244",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -862,12 +863,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c0122",
+ "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -875,12 +876,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0122",
+ "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -888,12 +889,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c0091",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -901,12 +902,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0091",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -914,12 +915,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3",
- "MSRValue": "0x3f803c0200",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
+ "MSRValue": "0x3F803C0200",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -927,12 +928,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3",
- "MSRValue": "0x3f803c0100",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
+ "MSRValue": "0x3F803C0100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -940,12 +941,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c0002",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -953,12 +954,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3",
- "MSRValue": "0x3f803c0002",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3",
+ "MSRValue": "0x3F803C0002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
index d7b9d9c9c518..ba0e0c4e74eb 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
@@ -42,7 +42,7 @@
{
"EventCode": "0xC7",
"UMask": "0x3",
- "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single precision?)",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR",
"SampleAfterValue": "2000003",
@@ -51,7 +51,7 @@
{
"EventCode": "0xC7",
"UMask": "0x4",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
@@ -60,7 +60,7 @@
{
"EventCode": "0xC7",
"UMask": "0x8",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
@@ -69,7 +69,7 @@
{
"EventCode": "0xC7",
"UMask": "0x10",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
@@ -78,7 +78,7 @@
{
"EventCode": "0xC7",
"UMask": "0x15",
- "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
"SampleAfterValue": "2000006",
@@ -87,7 +87,7 @@
{
"EventCode": "0xc7",
"UMask": "0x20",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
@@ -96,7 +96,7 @@
{
"EventCode": "0xC7",
"UMask": "0x2a",
- "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SINGLE",
"SampleAfterValue": "2000005",
@@ -105,7 +105,7 @@
{
"EventCode": "0xC7",
"UMask": "0x3c",
- "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single-precision?)",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.PACKED",
"SampleAfterValue": "2000004",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
index d79a5cfea44b..ecb413bb67ca 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
@@ -170,11 +170,11 @@
{
"EventCode": "0xc8",
"UMask": "0x4",
- "BriefDescription": "Number of times HLE abort was triggered (PEBS)",
+ "BriefDescription": "Number of times HLE abort was triggered",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED",
- "PublicDescription": "Number of times HLE abort was triggered (PEBS).",
+ "PublicDescription": "Number of times HLE abort was triggered.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -251,11 +251,11 @@
{
"EventCode": "0xc9",
"UMask": "0x4",
- "BriefDescription": "Number of times RTM abort was triggered (PEBS)",
+ "BriefDescription": "Number of times RTM abort was triggered",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Number of times RTM abort was triggered (PEBS).",
+ "PublicDescription": "Number of times RTM abort was triggered .",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -312,14 +312,14 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 4",
+ "BriefDescription": "Randomly selected loads with latency value being above 4",
"PEBS": "2",
"MSRValue": "0x4",
"Counter": "3",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
"Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above four.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above four.",
"TakenAlone": "1",
"SampleAfterValue": "100003",
"CounterHTOff": "3"
@@ -327,14 +327,14 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 8",
+ "BriefDescription": "Randomly selected loads with latency value being above 8",
"PEBS": "2",
"MSRValue": "0x8",
"Counter": "3",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
"Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above eight.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above eight.",
"TakenAlone": "1",
"SampleAfterValue": "50021",
"CounterHTOff": "3"
@@ -342,14 +342,14 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 16",
+ "BriefDescription": "Randomly selected loads with latency value being above 16",
"PEBS": "2",
"MSRValue": "0x10",
"Counter": "3",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
"Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 16.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 16.",
"TakenAlone": "1",
"SampleAfterValue": "20011",
"CounterHTOff": "3"
@@ -357,14 +357,14 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 32",
+ "BriefDescription": "Randomly selected loads with latency value being above 32",
"PEBS": "2",
"MSRValue": "0x20",
"Counter": "3",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
"Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 32.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 32.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "3"
@@ -372,14 +372,14 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 64",
+ "BriefDescription": "Randomly selected loads with latency value being above 64",
"PEBS": "2",
"MSRValue": "0x40",
"Counter": "3",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
"Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 64.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 64.",
"TakenAlone": "1",
"SampleAfterValue": "2003",
"CounterHTOff": "3"
@@ -387,14 +387,14 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 128",
+ "BriefDescription": "Randomly selected loads with latency value being above 128",
"PEBS": "2",
"MSRValue": "0x80",
"Counter": "3",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
"Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 128.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 128.",
"TakenAlone": "1",
"SampleAfterValue": "1009",
"CounterHTOff": "3"
@@ -402,14 +402,14 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 256",
+ "BriefDescription": "Randomly selected loads with latency value being above 256",
"PEBS": "2",
"MSRValue": "0x100",
"Counter": "3",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
"Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 256.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 256.",
"TakenAlone": "1",
"SampleAfterValue": "503",
"CounterHTOff": "3"
@@ -417,14 +417,14 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 512",
+ "BriefDescription": "Randomly selected loads with latency value being above 512",
"PEBS": "2",
"MSRValue": "0x200",
"Counter": "3",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
"Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 512.",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 512.",
"TakenAlone": "1",
"SampleAfterValue": "101",
"CounterHTOff": "3"
@@ -433,12 +433,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all requests that miss in the L3",
- "MSRValue": "0x3fbfc08fff",
+ "BriefDescription": "Counts all requests miss in the L3",
+ "MSRValue": "0x3FBFC08FFF",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all requests miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -446,12 +446,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache",
- "MSRValue": "0x087fc007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
+ "MSRValue": "0x087FC007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -459,12 +459,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103fc007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103FC007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -472,12 +472,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram",
- "MSRValue": "0x063bc007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
+ "MSRValue": "0x063BC007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -485,12 +485,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram",
- "MSRValue": "0x06040007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x06040007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -498,12 +498,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3",
- "MSRValue": "0x3fbfc007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
+ "MSRValue": "0x3FBFC007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -511,12 +511,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"MSRValue": "0x0604000244",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -524,12 +524,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads that miss in the L3",
- "MSRValue": "0x3fbfc00244",
+ "BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
+ "MSRValue": "0x3FBFC00244",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -537,12 +537,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"MSRValue": "0x0604000122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -550,12 +550,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3",
- "MSRValue": "0x3fbfc00122",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
+ "MSRValue": "0x3FBFC00122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -563,12 +563,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache",
- "MSRValue": "0x087fc00091",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
+ "MSRValue": "0x087FC00091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -576,12 +576,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103fc00091",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103FC00091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -589,12 +589,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram",
- "MSRValue": "0x063bc00091",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
+ "MSRValue": "0x063BC00091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -602,12 +602,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"MSRValue": "0x0604000091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -615,12 +615,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3",
- "MSRValue": "0x3fbfc00091",
+ "BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
+ "MSRValue": "0x3FBFC00091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -628,12 +628,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3",
- "MSRValue": "0x3fbfc00200",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
+ "MSRValue": "0x3FBFC00200",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -641,12 +641,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3",
- "MSRValue": "0x3fbfc00100",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
+ "MSRValue": "0x3FBFC00100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -654,12 +654,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103fc00002",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103FC00002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -667,12 +667,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3",
- "MSRValue": "0x3fbfc00002",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
+ "MSRValue": "0x3FBFC00002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
index 0d04bf9db000..c2f6932a5817 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
@@ -1,6 +1,5 @@
[
{
- "EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
"Counter": "Fixed counter 0",
@@ -10,7 +9,6 @@
"CounterHTOff": "Fixed counter 0"
},
{
- "EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state",
"Counter": "Fixed counter 1",
@@ -20,7 +18,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"Counter": "Fixed counter 1",
@@ -30,7 +27,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
"Counter": "Fixed counter 2",
@@ -322,7 +318,7 @@
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
"Counter": "0,1,2,3",
"EventName": "ILD_STALL.LCP",
- "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
+ "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -786,12 +782,12 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA2",
+ "EventCode": "0xa2",
"UMask": "0x1",
"BriefDescription": "Resource-related stall cycles",
"Counter": "0,1,2,3",
"EventName": "RESOURCE_STALLS.ANY",
- "PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "PublicDescription": "This event counts resource-related stall cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1168,12 +1164,12 @@
{
"EventCode": "0xC2",
"UMask": "0x1",
- "BriefDescription": "Actually retired uops. (Precise Event - PEBS)",
+ "BriefDescription": "Actually retired uops.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.ALL",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
+ "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1204,11 +1200,11 @@
{
"EventCode": "0xC2",
"UMask": "0x2",
- "BriefDescription": "Retirement slots used. (Precise Event - PEBS)",
+ "BriefDescription": "Retirement slots used.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of retirement slots used.",
+ "PublicDescription": "This event counts the number of retirement slots used.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1266,33 +1262,33 @@
{
"EventCode": "0xC4",
"UMask": "0x1",
- "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS)",
+ "BriefDescription": "Conditional branch instructions retired.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
+ "PublicDescription": "This event counts conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x2",
- "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS)",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "This event counts both direct and indirect near call instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x2",
- "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS)",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect macro near call instructions retired (captured in ring 3).",
+ "PublicDescription": "This event counts both direct and indirect macro near call instructions retired (captured in ring 3).",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1311,11 +1307,11 @@
{
"EventCode": "0xC4",
"UMask": "0x8",
- "BriefDescription": "Return instructions retired. (Precise Event - PEBS)",
+ "BriefDescription": "Return instructions retired.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
+ "PublicDescription": "This event counts return instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1332,11 +1328,11 @@
{
"EventCode": "0xC4",
"UMask": "0x20",
- "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS)",
+ "BriefDescription": "Taken branch instructions retired.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
+ "PublicDescription": "This event counts taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1364,11 +1360,11 @@
{
"EventCode": "0xC5",
"UMask": "0x1",
- "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS)",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "PublicDescription": "This event counts mispredicted conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1386,22 +1382,22 @@
{
"EventCode": "0xC5",
"UMask": "0x8",
- "BriefDescription": "This event counts the number of mispredicted ret instructions retired.(Precise Event)",
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.RET",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted return instructions retired.",
+ "PublicDescription": "This event counts mispredicted return instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC5",
"UMask": "0x20",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
index 36c903faed0b..1a1a3501180a 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
@@ -1,164 +1,394 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
- "MetricGroup": "Frontend",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "Branch_Misprediction_Cost"
+ },
+ {
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts_SMT",
+ "MetricName": "Branch_Misprediction_Cost_SMT"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Access_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_Lat",
+ "MetricName": "DRAM_Read_Latency"
+ },
+ {
+ "MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@",
+ "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_Parallel_Reads"
+ },
+ {
+ "MetricExpr": "( 1000000000 * ( imc@event\\=0xe0\\\\\\,umask\\=0x1@ / imc@event\\=0xe3@ ) / imc_0@event\\=0x0@ ) if 1 if 1 == 1 else 0 else 0",
+ "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
+ "MetricGroup": "Memory_Lat",
+ "MetricName": "MEM_PMM_Read_Latency"
+ },
+ {
+ "MetricExpr": "( ( 64 * imc@event\\=0xe3@ / 1000000000 ) / duration_time ) if 1 if 1 == 1 else 0 else 0",
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "PMM_Read_BW"
+ },
+ {
+ "MetricExpr": "( ( 64 * imc@event\\=0xe7@ / 1000000000 ) / duration_time ) if 1 if 1 == 1 else 0 else 0",
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "PMM_Write_BW"
+ },
+ {
+ "MetricExpr": "cha_0@event\\=0x0@",
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricGroup": "",
+ "MetricName": "Socket_CLKS"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/cache.json b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
index f8bbe087b0f8..52a105666afc 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/cache.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
@@ -77,7 +77,8 @@
"UMask": "0x21",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Locked load uops retired (Precise event capable)"
+ "BriefDescription": "Locked load uops retired (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -88,7 +89,8 @@
"UMask": "0x41",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -99,7 +101,8 @@
"UMask": "0x42",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -110,7 +113,8 @@
"UMask": "0x43",
"EventName": "MEM_UOPS_RETIRED.SPLIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -121,7 +125,8 @@
"UMask": "0x81",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired (Precise event capable)"
+ "BriefDescription": "Load uops retired (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -132,7 +137,8 @@
"UMask": "0x82",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Store uops retired (Precise event capable)"
+ "BriefDescription": "Store uops retired (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -143,7 +149,8 @@
"UMask": "0x83",
"EventName": "MEM_UOPS_RETIRED.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired (Precise event capable)"
+ "BriefDescription": "Memory uops retired (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -154,7 +161,8 @@
"UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)"
+ "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -165,7 +173,8 @@
"UMask": "0x2",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that hit L2 (Precise event capable)"
+ "BriefDescription": "Load uops retired that hit L2 (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -176,7 +185,8 @@
"UMask": "0x8",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)"
+ "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -187,7 +197,8 @@
"UMask": "0x10",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed L2 (Precise event capable)"
+ "BriefDescription": "Load uops retired that missed L2 (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -198,7 +209,8 @@
"UMask": "0x20",
"EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)"
+ "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -209,7 +221,8 @@
"UMask": "0x40",
"EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads retired that hit WCB (Precise event capable)"
+ "BriefDescription": "Loads retired that hit WCB (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -220,26 +233,14 @@
"UMask": "0x80",
"EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads retired that came from DRAM (Precise event capable)"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x40000032b7 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "BriefDescription": "Loads retired that came from DRAM (Precise event capable)",
+ "Data_LA": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x36000032b7 ",
+ "MSRValue": "0x36000032b7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.ANY",
@@ -252,7 +253,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x10000032b7 ",
+ "MSRValue": "0x10000032b7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HITM_OTHER_CORE",
@@ -265,7 +266,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x04000032b7 ",
+ "MSRValue": "0x04000032b7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -278,20 +279,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x02000032b7 ",
+ "MSRValue": "0x02000032b7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x00000432b7 ",
+ "MSRValue": "0x00000432b7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT",
@@ -302,35 +303,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x00000132b7 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000022 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000022 ",
+ "MSRValue": "0x3600000022",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.ANY",
@@ -343,7 +318,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000000022 ",
+ "MSRValue": "0x1000000022",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
@@ -356,7 +331,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400000022 ",
+ "MSRValue": "0x0400000022",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -369,20 +344,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200000022 ",
+ "MSRValue": "0x0200000022",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000040022 ",
+ "MSRValue": "0x0000040022",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT",
@@ -393,32 +368,6 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010022 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000003091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600003091",
@@ -466,7 +415,7 @@
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -484,35 +433,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000013091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000003010 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600003010 ",
+ "MSRValue": "0x3600003010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.ANY",
@@ -525,7 +448,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000003010 ",
+ "MSRValue": "0x1000003010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HITM_OTHER_CORE",
@@ -538,7 +461,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400003010 ",
+ "MSRValue": "0x0400003010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -551,20 +474,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200003010 ",
+ "MSRValue": "0x0200003010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000043010 ",
+ "MSRValue": "0x0000043010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_HIT",
@@ -575,48 +498,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000013010 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the uncore subsystem that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x3600008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.ANY",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000008000 ",
+ "MSRValue": "0x1000008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
@@ -629,7 +513,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400008000 ",
+ "MSRValue": "0x0400008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -642,20 +526,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200008000 ",
+ "MSRValue": "0x0200008000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the uncore subsystem that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000048000 ",
+ "MSRValue": "0x0000048000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT",
@@ -668,7 +552,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts requests to the uncore subsystem that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000018000 ",
+ "MSRValue": "0x0000018000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
@@ -679,22 +563,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000004800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600004800 ",
+ "MSRValue": "0x3600004800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.ANY",
@@ -705,48 +576,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x1000004800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0400004800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0200004800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. ",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000044800 ",
+ "MSRValue": "0x0000044800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_HIT",
@@ -757,35 +589,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000014800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000004000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600004000 ",
+ "MSRValue": "0x3600004000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.ANY",
@@ -798,7 +604,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000004000 ",
+ "MSRValue": "0x1000004000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
@@ -811,7 +617,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400004000 ",
+ "MSRValue": "0x0400004000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -824,20 +630,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200004000 ",
+ "MSRValue": "0x0200004000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000044000 ",
+ "MSRValue": "0x0000044000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_HIT",
@@ -848,35 +654,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000014000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000002000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600002000 ",
+ "MSRValue": "0x3600002000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.ANY",
@@ -889,7 +669,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000002000 ",
+ "MSRValue": "0x1000002000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
@@ -902,7 +682,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400002000 ",
+ "MSRValue": "0x0400002000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -915,20 +695,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200002000 ",
+ "MSRValue": "0x0200002000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000042000 ",
+ "MSRValue": "0x0000042000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT",
@@ -939,35 +719,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000012000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache lines requests by software prefetch instructions that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000001000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600001000 ",
+ "MSRValue": "0x3600001000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.ANY",
@@ -980,7 +734,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000001000 ",
+ "MSRValue": "0x1000001000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HITM_OTHER_CORE",
@@ -993,7 +747,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400001000 ",
+ "MSRValue": "0x0400001000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -1006,20 +760,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200001000 ",
+ "MSRValue": "0x0200001000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cache lines requests by software prefetch instructions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000041000 ",
+ "MSRValue": "0x0000041000",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_HIT",
@@ -1030,35 +784,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache lines requests by software prefetch instructions that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000011000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000800 ",
+ "MSRValue": "0x3600000800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.ANY",
@@ -1071,7 +799,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000000800 ",
+ "MSRValue": "0x1000000800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
@@ -1084,7 +812,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400000800 ",
+ "MSRValue": "0x0400000800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -1097,20 +825,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200000800 ",
+ "MSRValue": "0x0200000800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000040800 ",
+ "MSRValue": "0x0000040800",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_HIT",
@@ -1121,100 +849,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000400 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x3600000400 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.ANY",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x1000000400 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0400000400 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0200000400 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests that true miss for the L2 cache with a snoop miss in the other processor module. ",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000040400 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests that hit the L2 cache.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts bus lock and split lock requests that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000010400 ",
+ "MSRValue": "0x0000010400",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
@@ -1225,113 +862,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts code reads in uncacheable (UC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000200 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads in uncacheable (UC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x3600000200 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.ANY",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x1000000200 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0400000200 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts code reads in uncacheable (UC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0200000200 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads in uncacheable (UC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. ",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts code reads in uncacheable (UC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000040200 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads in uncacheable (UC) memory region that hit the L2 cache.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts code reads in uncacheable (UC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010200 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads in uncacheable (UC) memory region that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000100 ",
+ "MSRValue": "0x3600000100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.ANY",
@@ -1342,87 +875,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x1000000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0400000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0200000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that true miss for the L2 cache with a snoop miss in the other processor module. ",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that hit the L2 cache.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000080 ",
+ "MSRValue": "0x3600000080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.ANY",
@@ -1433,87 +888,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x1000000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0400000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0200000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that true miss for the L2 cache with a snoop miss in the other processor module. ",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that hit the L2 cache.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000020 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000020 ",
+ "MSRValue": "0x3600000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.ANY",
@@ -1526,7 +903,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000000020 ",
+ "MSRValue": "0x1000000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
@@ -1539,7 +916,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400000020 ",
+ "MSRValue": "0x0400000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -1552,20 +929,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200000020 ",
+ "MSRValue": "0x0200000020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000040020 ",
+ "MSRValue": "0x0000040020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT",
@@ -1576,35 +953,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010020 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000010 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000010 ",
+ "MSRValue": "0x3600000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.ANY",
@@ -1617,7 +968,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000000010 ",
+ "MSRValue": "0x1000000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
@@ -1630,7 +981,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400000010 ",
+ "MSRValue": "0x0400000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -1643,20 +994,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200000010 ",
+ "MSRValue": "0x0200000010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000040010 ",
+ "MSRValue": "0x0000040010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_HIT",
@@ -1667,35 +1018,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010010 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x4000000008 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000008 ",
+ "MSRValue": "0x3600000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.ANY",
@@ -1708,7 +1033,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000000008 ",
+ "MSRValue": "0x1000000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HITM_OTHER_CORE",
@@ -1721,7 +1046,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400000008 ",
+ "MSRValue": "0x0400000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -1734,20 +1059,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200000008 ",
+ "MSRValue": "0x0200000008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000040008 ",
+ "MSRValue": "0x0000040008",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.L2_HIT",
@@ -1758,22 +1083,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010008 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x4000000004 ",
+ "MSRValue": "0x4000000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
@@ -1786,7 +1098,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000004 ",
+ "MSRValue": "0x3600000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.ANY",
@@ -1797,22 +1109,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x1000000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400000004 ",
+ "MSRValue": "0x0400000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -1825,20 +1124,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200000004 ",
+ "MSRValue": "0x0200000004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000040004 ",
+ "MSRValue": "0x0000040004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT",
@@ -1849,22 +1148,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x4000000002 ",
+ "MSRValue": "0x4000000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
@@ -1877,7 +1163,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000002 ",
+ "MSRValue": "0x3600000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.ANY",
@@ -1890,7 +1176,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000000002 ",
+ "MSRValue": "0x1000000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
@@ -1903,7 +1189,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400000002 ",
+ "MSRValue": "0x0400000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -1916,20 +1202,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200000002 ",
+ "MSRValue": "0x0200000002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000040002 ",
+ "MSRValue": "0x0000040002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT",
@@ -1940,22 +1226,9 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
"PublicDescription": "Counts demand cacheable data reads of full cache lines that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x4000000001 ",
+ "MSRValue": "0x4000000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
@@ -1968,7 +1241,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x3600000001 ",
+ "MSRValue": "0x3600000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.ANY",
@@ -1981,7 +1254,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x1000000001 ",
+ "MSRValue": "0x1000000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
@@ -1994,7 +1267,7 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0400000001 ",
+ "MSRValue": "0x0400000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
@@ -2007,20 +1280,20 @@
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0200000001 ",
+ "MSRValue": "0x0200000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
- "MSRValue": "0x0000040001 ",
+ "MSRValue": "0x0000040001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT",
@@ -2028,18 +1301,5 @@
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache.",
"Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand cacheable data reads of full cache lines that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x0000010001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/memory.json b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
index 690cebd12a94..197dc76d49dd 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
@@ -30,265 +30,5 @@
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"SampleAfterValue": "200003",
"BriefDescription": "Machine clears due to memory ordering issue"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x20000032b7 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000022 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000003091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000003010 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000004800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000004000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000002000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000001000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000400 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000200 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000020 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000010 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000008 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
- "EventCode": "0xB7",
- "MSRValue": "0x2000000001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache and targets non-DRAM system address.",
- "Offcore": "1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
index 254788af8ab6..6342368accf8 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
@@ -1,7 +1,6 @@
[
{
"PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
"Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
@@ -10,7 +9,6 @@
},
{
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.CORE",
@@ -19,7 +17,6 @@
},
{
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
@@ -188,7 +185,7 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.",
+ "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel\u00ae architecture processors.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x1",
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
index 9805198d3f5f..343d66bbd777 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
@@ -48,7 +48,8 @@
"UMask": "0x11",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)"
+ "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -59,7 +60,8 @@
"UMask": "0x12",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)"
+ "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -70,6 +72,7 @@
"UMask": "0x13",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)"
+ "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
index b4791b443a66..5a6ac8285ad4 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
@@ -92,7 +92,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Locked load uops retired (Precise event capable)"
+ "BriefDescription": "Locked load uops retired (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -104,7 +105,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -116,7 +118,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -128,7 +131,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -140,7 +144,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired (Precise event capable)"
+ "BriefDescription": "Load uops retired (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -152,7 +157,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Store uops retired (Precise event capable)"
+ "BriefDescription": "Store uops retired (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -164,7 +170,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired (Precise event capable)"
+ "BriefDescription": "Memory uops retired (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -176,7 +183,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)"
+ "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -188,7 +196,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that hit L2 (Precise event capable)"
+ "BriefDescription": "Load uops retired that hit L2 (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -200,7 +209,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)"
+ "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -212,7 +222,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed L2 (Precise event capable)"
+ "BriefDescription": "Load uops retired that missed L2 (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -224,7 +235,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)"
+ "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -236,7 +248,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads retired that hit WCB (Precise event capable)"
+ "BriefDescription": "Loads retired that hit WCB (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -248,7 +261,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads retired that came from DRAM (Precise event capable)"
+ "BriefDescription": "Loads retired that came from DRAM (Precise event capable)",
+ "Data_LA": "1"
},
{
"CollectPEBSRecord": "1",
@@ -292,7 +306,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -367,7 +381,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -442,7 +456,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -517,7 +531,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -592,7 +606,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -667,7 +681,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -742,7 +756,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -817,7 +831,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -892,7 +906,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -967,7 +981,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -1042,7 +1056,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -1117,7 +1131,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -1192,7 +1206,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -1267,7 +1281,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -1342,7 +1356,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
@@ -1417,7 +1431,7 @@
"PDIR_COUNTER": "na",
"MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
"Offcore": "1"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
index ccf1aed69197..e3fa1a0ba71b 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
@@ -3,7 +3,6 @@
"PEBS": "2",
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
"Counter": "Fixed counter 0",
"UMask": "0x1",
"PEBScounters": "32",
@@ -15,7 +14,6 @@
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"PEBScounters": "33",
@@ -27,7 +25,6 @@
{
"CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x3",
"PEBScounters": "34",
@@ -231,7 +228,7 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.",
+ "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel\u00ae architecture processors.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x1",
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
index 0b53a3b0dfb8..0d32fd26ded1 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
@@ -189,7 +189,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)"
+ "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -201,7 +202,8 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)"
+ "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1"
},
{
"PEBS": "2",
@@ -213,6 +215,7 @@
"PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)"
+ "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/cache.json b/tools/perf/pmu-events/arch/x86/haswell/cache.json
index da4d6ddd4f92..7fb0ad8d8ca1 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/cache.json
@@ -63,10 +63,10 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x41",
+ "UMask": "0xc1",
"Errata": "HSD78",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"SampleAfterValue": "200003",
@@ -77,7 +77,7 @@
"PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x42",
+ "UMask": "0xc2",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "RFO requests that hit L2 cache",
@@ -87,7 +87,7 @@
"PublicDescription": "Number of instruction fetches that hit the L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x44",
+ "UMask": "0xc4",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
@@ -97,7 +97,7 @@
"PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x50",
+ "UMask": "0xd0",
"EventName": "L2_RQSTS.L2_PF_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "L2 prefetch requests that hit L2 cache",
@@ -610,7 +610,7 @@
"Errata": "HSD29, HSD25, HSM26, HSM30",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. ",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -623,7 +623,7 @@
"Errata": "HSD29, HSD25, HSM26, HSM30",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. ",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -792,7 +792,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "",
"EventCode": "0xf4",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -802,262 +801,262 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all requests hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c8fff",
+ "MSRValue": "0x3F803C8FFF",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all requests that hit in the L3",
+ "BriefDescription": "Counts all requests hit in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c07f7",
+ "MSRValue": "0x10003C07F7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c07f7",
+ "MSRValue": "0x04003C07F7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0244",
+ "MSRValue": "0x04003C0244",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0122",
+ "MSRValue": "0x10003C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0122",
+ "MSRValue": "0x04003C0122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0091",
+ "MSRValue": "0x10003C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0091",
+ "MSRValue": "0x04003C0091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0200",
+ "MSRValue": "0x3F803C0200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0100",
+ "MSRValue": "0x3F803C0100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0080",
+ "MSRValue": "0x3F803C0080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0040",
+ "MSRValue": "0x3F803C0040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0020",
+ "MSRValue": "0x3F803C0020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0010",
+ "MSRValue": "0x3F803C0010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0004",
+ "MSRValue": "0x10003C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0004",
+ "MSRValue": "0x04003C0004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0002",
+ "MSRValue": "0x10003C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0002",
+ "MSRValue": "0x04003C0002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0001",
+ "MSRValue": "0x10003C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003c0001",
+ "MSRValue": "0x04003C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
index f9843e5a9b42..f5a3beaa19fc 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
@@ -1,22 +1,26 @@
[
{
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x8",
"Errata": "HSD56, HSM57",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x10",
"Errata": "HSD56, HSM57",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "BriefDescription": "Number of transitions from legacy SSE to AVX-256 when penalty applicable",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -30,53 +34,58 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of X87 FP assists due to output values.",
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "FP_ASSIST.X87_OUTPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to output value.",
+ "BriefDescription": "output - Numeric Overflow, Numeric Underflow, Inexact Result",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of X87 FP assists due to input values.",
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "FP_ASSIST.X87_INPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to input value.",
+ "BriefDescription": "input - Invalid Operation, Denormal Operand, SNaN Operand",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of SIMD FP assists due to output values.",
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to Output values",
+ "BriefDescription": "SSE* FP micro-code assist when output value is invalid.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of SIMD FP assists due to input values.",
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "FP_ASSIST.SIMD_INPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to input values",
+ "BriefDescription": "Any input SSE* FP Assist",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles with any input/output SSE* or FP assists.",
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x1e",
"EventName": "FP_ASSIST.ANY",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "BriefDescription": "Counts any FP_ASSIST umask was incrementing",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
index 5ab5c78fe580..21b27488b621 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
@@ -1,158 +1,322 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
- "MetricGroup": "Frontend",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION )) ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
},
{
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/memory.json b/tools/perf/pmu-events/arch/x86/haswell/memory.json
index e5f9fa6655b3..ef13ed88e2ea 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/memory.json
@@ -298,7 +298,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
"SampleAfterValue": "100003",
- "BriefDescription": "Loads with latency value being above 4.",
+ "BriefDescription": "Randomly selected loads with latency value being above 4.",
"TakenAlone": "1",
"CounterHTOff": "3"
},
@@ -312,7 +312,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
"SampleAfterValue": "50021",
- "BriefDescription": "Loads with latency value being above 8.",
+ "BriefDescription": "Randomly selected loads with latency value being above 8.",
"TakenAlone": "1",
"CounterHTOff": "3"
},
@@ -326,7 +326,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
"SampleAfterValue": "20011",
- "BriefDescription": "Loads with latency value being above 16.",
+ "BriefDescription": "Randomly selected loads with latency value being above 16.",
"TakenAlone": "1",
"CounterHTOff": "3"
},
@@ -340,7 +340,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
"SampleAfterValue": "100003",
- "BriefDescription": "Loads with latency value being above 32.",
+ "BriefDescription": "Randomly selected loads with latency value being above 32.",
"TakenAlone": "1",
"CounterHTOff": "3"
},
@@ -354,7 +354,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
"SampleAfterValue": "2003",
- "BriefDescription": "Loads with latency value being above 64.",
+ "BriefDescription": "Randomly selected loads with latency value being above 64.",
"TakenAlone": "1",
"CounterHTOff": "3"
},
@@ -368,7 +368,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
"SampleAfterValue": "1009",
- "BriefDescription": "Loads with latency value being above 128.",
+ "BriefDescription": "Randomly selected loads with latency value being above 128.",
"TakenAlone": "1",
"CounterHTOff": "3"
},
@@ -382,7 +382,7 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
"SampleAfterValue": "503",
- "BriefDescription": "Loads with latency value being above 256.",
+ "BriefDescription": "Randomly selected loads with latency value being above 256.",
"TakenAlone": "1",
"CounterHTOff": "3"
},
@@ -396,280 +396,280 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
"SampleAfterValue": "101",
- "BriefDescription": "Loads with latency value being above 512.",
+ "BriefDescription": "Randomly selected loads with latency value being above 512.",
"TakenAlone": "1",
"CounterHTOff": "3"
},
{
- "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all requests miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc08fff",
+ "MSRValue": "0x3FFFC08FFF",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all requests that miss in the L3",
+ "BriefDescription": "Counts all requests miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01004007f7",
+ "MSRValue": "0x01004007F7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "miss the L3 and the data is returned from local dram",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc007f7",
+ "MSRValue": "0x3FFFC007F7",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3",
+ "BriefDescription": "miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400244",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00244",
+ "MSRValue": "0x3FFFC00244",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch code reads that miss in the L3",
+ "BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00122",
+ "MSRValue": "0x3FFFC00122",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00091",
+ "MSRValue": "0x3FFFC00091",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3",
+ "BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00200",
+ "MSRValue": "0x3FFFC00200",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00100",
+ "MSRValue": "0x3FFFC00100",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00080",
+ "MSRValue": "0x3FFFC00080",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00040",
+ "MSRValue": "0x3FFFC00040",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00020",
+ "MSRValue": "0x3FFFC00020",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00010",
+ "MSRValue": "0x3FFFC00010",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00004",
+ "MSRValue": "0x3FFFC00004",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss in the L3",
+ "BriefDescription": "Counts all demand code reads miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00002",
+ "MSRValue": "0x3FFFC00002",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00001",
+ "MSRValue": "0x3FFFC00001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss in the L3",
+ "BriefDescription": "Counts demand data reads miss in the L3",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
index a4dcfce4a512..734d3873729e 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
@@ -1,7 +1,6 @@
[
{
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
- "EventCode": "0x00",
"Counter": "Fixed counter 0",
"UMask": "0x1",
"Errata": "HSD140, HSD143",
@@ -12,7 +11,6 @@
},
{
"PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -21,7 +19,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"AnyThread": "1",
@@ -32,7 +29,6 @@
},
{
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.",
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
@@ -1071,7 +1067,8 @@
"CounterHTOff": "1"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
"EventCode": "0xC0",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -1081,13 +1078,13 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of microcode assists invoked by HW upon uop writeback.",
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x40",
"EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -1102,28 +1099,34 @@
"Data_LA": "1"
},
{
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
+ "BriefDescription": "Cycles no executable uops retired",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "BriefDescription": "Number of cycles using always true condition applied to PEBS uops retired event.",
"CounterMask": "10",
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
@@ -1131,7 +1134,7 @@
"AnyThread": "1",
"EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
+ "BriefDescription": "Cycles no executable uops retired on core",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -1245,13 +1248,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts the number of not taken branch instructions retired.",
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired.",
+ "BriefDescription": "Counts all not taken macro branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -1265,13 +1269,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of far branches retired.",
+ "PEBS": "1",
+ "PublicDescription": "",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x40",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "100003",
- "BriefDescription": "Far branch instructions retired.",
+ "BriefDescription": "Counts the number of far branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/cache.json b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
index b2fbd617306a..a9e62d4357af 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
@@ -64,18 +64,18 @@
},
{
"EventCode": "0x24",
- "UMask": "0x41",
+ "UMask": "0xc1",
"BriefDescription": "Demand Data Read requests that hit L2 cache",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"Errata": "HSD78",
- "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x24",
- "UMask": "0x42",
+ "UMask": "0xc2",
"BriefDescription": "RFO requests that hit L2 cache",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.RFO_HIT",
@@ -85,7 +85,7 @@
},
{
"EventCode": "0x24",
- "UMask": "0x44",
+ "UMask": "0xc4",
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.CODE_RD_HIT",
@@ -95,7 +95,7 @@
},
{
"EventCode": "0x24",
- "UMask": "0x50",
+ "UMask": "0xd0",
"BriefDescription": "L2 prefetch requests that hit L2 cache",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.L2_PF_HIT",
@@ -416,7 +416,7 @@
{
"EventCode": "0xD0",
"UMask": "0x11",
- "BriefDescription": "Retired load uops that miss the STLB. (precise Event)",
+ "BriefDescription": "Retired load uops that miss the STLB.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -428,7 +428,7 @@
{
"EventCode": "0xD0",
"UMask": "0x12",
- "BriefDescription": "Retired store uops that miss the STLB. (precise Event)",
+ "BriefDescription": "Retired store uops that miss the STLB.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -441,7 +441,7 @@
{
"EventCode": "0xD0",
"UMask": "0x21",
- "BriefDescription": "Retired load uops with locked access. (precise Event)",
+ "BriefDescription": "Retired load uops with locked access.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -453,34 +453,32 @@
{
"EventCode": "0xD0",
"UMask": "0x41",
- "BriefDescription": "Retired load uops that split across a cacheline boundary. (precise Event)",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"Errata": "HSD29, HSM30",
- "PublicDescription": "This event counts load uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x42",
- "BriefDescription": "Retired store uops that split across a cacheline boundary. (precise Event)",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"Errata": "HSD29, HSM30",
"L1_Hit_Indication": "1",
- "PublicDescription": "This event counts store uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x81",
- "BriefDescription": "All retired load uops. (precise Event)",
+ "BriefDescription": "All retired load uops.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -492,14 +490,13 @@
{
"EventCode": "0xD0",
"UMask": "0x82",
- "BriefDescription": "All retired store uops. (precise Event)",
+ "BriefDescription": "All retired store uops.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"Errata": "HSD29, HSM30",
"L1_Hit_Indication": "1",
- "PublicDescription": "This event counts all store uops retired. This is a precise event.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -530,13 +527,13 @@
{
"EventCode": "0xD1",
"UMask": "0x4",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
- "PublicDescription": "This event counts retired load uops in which data sources were data hits in the L3 cache without snoops required. This does not include hardware prefetches. This is a precise event.",
+ "PublicDescription": "Retired load uops with L3 cache hits as data sources.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
@@ -549,19 +546,20 @@
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"Errata": "HSM30",
- "PublicDescription": "This event counts retired load uops in which data sources missed in the L1 cache. This does not include hardware prefetches. This is a precise event.",
+ "PublicDescription": "Retired load uops missed L1 cache as data sources.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x10",
- "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
+ "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"Errata": "HSD29, HSM30",
+ "PublicDescription": "Retired load uops missed L2. Unknown data source excluded.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
@@ -574,6 +572,7 @@
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
+ "PublicDescription": "Retired load uops missed L3. Excludes unknown data source .",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -604,26 +603,24 @@
{
"EventCode": "0xD2",
"UMask": "0x2",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. ",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"Errata": "HSD29, HSD25, HSM26, HSM30",
- "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HIT in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. ",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"Errata": "HSD29, HSD25, HSM26, HSM30",
- "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HITM (hit modified) in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
@@ -642,19 +639,20 @@
{
"EventCode": "0xD3",
"UMask": "0x1",
+ "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"Errata": "HSD74, HSD29, HSD25, HSM30",
- "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x4",
- "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -666,7 +664,7 @@
{
"EventCode": "0xD3",
"UMask": "0x10",
- "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -678,7 +676,7 @@
{
"EventCode": "0xD3",
"UMask": "0x20",
- "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -833,7 +831,6 @@
"BriefDescription": "Split locks in SQ",
"Counter": "0,1,2,3",
"EventName": "SQ_MISC.SPLIT_LOCK",
- "PublicDescription": "",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -841,12 +838,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0001",
+ "BriefDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -854,12 +851,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c0001",
+ "BriefDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -867,12 +864,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0002",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -880,12 +877,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c0002",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -893,12 +890,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0004",
+ "BriefDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -906,12 +903,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c0004",
+ "BriefDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -919,12 +916,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3",
- "MSRValue": "0x3f803c0010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
+ "MSRValue": "0x3F803C0010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -932,12 +929,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3",
- "MSRValue": "0x3f803c0020",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
+ "MSRValue": "0x3F803C0020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -945,12 +942,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3",
- "MSRValue": "0x3f803c0040",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
+ "MSRValue": "0x3F803C0040",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -958,12 +955,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3",
- "MSRValue": "0x3f803c0080",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
+ "MSRValue": "0x3F803C0080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -971,12 +968,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3",
- "MSRValue": "0x3f803c0100",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
+ "MSRValue": "0x3F803C0100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -984,12 +981,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3",
- "MSRValue": "0x3f803c0200",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
+ "MSRValue": "0x3F803C0200",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -997,12 +994,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0091",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1010,12 +1007,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c0091",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1023,12 +1020,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0122",
+ "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1036,12 +1033,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c0122",
+ "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1049,12 +1046,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c0244",
+ "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C0244",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1062,12 +1059,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003c07f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003C07F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1075,12 +1072,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003c07f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C07F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1088,12 +1085,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all requests that hit in the L3",
- "MSRValue": "0x3f803c8fff",
+ "BriefDescription": "Counts all requests hit in the L3",
+ "MSRValue": "0x3F803C8FFF",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all requests hit in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
index 5ab5c78fe580..e5aac148c941 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
@@ -1,158 +1,340 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
"MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
- "MetricGroup": "Frontend",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION )) ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
},
{
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )",
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_Lat",
+ "MetricName": "DRAM_Read_Latency"
+ },
+ {
+ "MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@",
+ "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_Parallel_Reads"
+ },
+ {
+ "MetricExpr": "cbox_0@event\\=0x0@",
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricGroup": "",
+ "MetricName": "Socket_CLKS"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/memory.json b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
index 56b0f24b8029..a42d5ce86b6f 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
@@ -291,7 +291,7 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 4.",
+ "BriefDescription": "Randomly selected loads with latency value being above 4.",
"PEBS": "2",
"MSRValue": "0x4",
"Counter": "3",
@@ -305,7 +305,7 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 8.",
+ "BriefDescription": "Randomly selected loads with latency value being above 8.",
"PEBS": "2",
"MSRValue": "0x8",
"Counter": "3",
@@ -319,7 +319,7 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 16.",
+ "BriefDescription": "Randomly selected loads with latency value being above 16.",
"PEBS": "2",
"MSRValue": "0x10",
"Counter": "3",
@@ -333,7 +333,7 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 32.",
+ "BriefDescription": "Randomly selected loads with latency value being above 32.",
"PEBS": "2",
"MSRValue": "0x20",
"Counter": "3",
@@ -347,7 +347,7 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 64.",
+ "BriefDescription": "Randomly selected loads with latency value being above 64.",
"PEBS": "2",
"MSRValue": "0x40",
"Counter": "3",
@@ -361,7 +361,7 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 128.",
+ "BriefDescription": "Randomly selected loads with latency value being above 128.",
"PEBS": "2",
"MSRValue": "0x80",
"Counter": "3",
@@ -375,7 +375,7 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 256.",
+ "BriefDescription": "Randomly selected loads with latency value being above 256.",
"PEBS": "2",
"MSRValue": "0x100",
"Counter": "3",
@@ -389,7 +389,7 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 512.",
+ "BriefDescription": "Randomly selected loads with latency value being above 512.",
"PEBS": "2",
"MSRValue": "0x200",
"Counter": "3",
@@ -404,12 +404,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that miss in the L3",
- "MSRValue": "0x3fbfc00001",
+ "BriefDescription": "Counts demand data reads miss in the L3",
+ "MSRValue": "0x3FBFC00001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -417,12 +417,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
"MSRValue": "0x0600400001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -430,12 +430,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3",
- "MSRValue": "0x3fbfc00002",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
+ "MSRValue": "0x3FBFC00002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -443,12 +443,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
"MSRValue": "0x0600400002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -456,12 +456,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103fc00002",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103FC00002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -469,12 +469,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that miss in the L3",
- "MSRValue": "0x3fbfc00004",
+ "BriefDescription": "Counts all demand code reads miss in the L3",
+ "MSRValue": "0x3FBFC00004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -482,12 +482,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
"MSRValue": "0x0600400004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -495,12 +495,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3",
- "MSRValue": "0x3fbfc00010",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
+ "MSRValue": "0x3FBFC00010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -508,12 +508,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3",
- "MSRValue": "0x3fbfc00020",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
+ "MSRValue": "0x3FBFC00020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -521,12 +521,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3",
- "MSRValue": "0x3fbfc00040",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
+ "MSRValue": "0x3FBFC00040",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -534,12 +534,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3",
- "MSRValue": "0x3fbfc00080",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
+ "MSRValue": "0x3FBFC00080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -547,12 +547,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3",
- "MSRValue": "0x3fbfc00100",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
+ "MSRValue": "0x3FBFC00100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -560,12 +560,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3",
- "MSRValue": "0x3fbfc00200",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
+ "MSRValue": "0x3FBFC00200",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -573,12 +573,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3",
- "MSRValue": "0x3fbfc00091",
+ "BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
+ "MSRValue": "0x3FBFC00091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -586,12 +586,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"MSRValue": "0x0600400091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -599,12 +599,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram",
- "MSRValue": "0x063f800091",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
+ "MSRValue": "0x063F800091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -612,12 +612,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103fc00091",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103FC00091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -625,12 +625,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache",
- "MSRValue": "0x083fc00091",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
+ "MSRValue": "0x083FC00091",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -638,12 +638,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3",
- "MSRValue": "0x3fbfc00122",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
+ "MSRValue": "0x3FBFC00122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -651,12 +651,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"MSRValue": "0x0600400122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -664,12 +664,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads that miss in the L3",
- "MSRValue": "0x3fbfc00244",
+ "BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
+ "MSRValue": "0x3FBFC00244",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -677,12 +677,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"MSRValue": "0x0600400244",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -690,12 +690,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3",
- "MSRValue": "0x3fbfc007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
+ "MSRValue": "0x3FBFC007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -703,12 +703,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram",
- "MSRValue": "0x06004007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x06004007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -716,12 +716,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram",
- "MSRValue": "0x063f8007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
+ "MSRValue": "0x063F8007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -729,12 +729,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103fc007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103FC007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -742,12 +742,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache",
- "MSRValue": "0x083fc007f7",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
+ "MSRValue": "0x083FC007F7",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -755,12 +755,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all requests that miss in the L3",
- "MSRValue": "0x3fbfc08fff",
+ "BriefDescription": "Counts all requests miss in the L3",
+ "MSRValue": "0x3FBFC08FFF",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all requests miss in the L3",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
index 8a18bfe9e3e4..26f2888341ee 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
@@ -1,6 +1,5 @@
[
{
- "EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
"Counter": "Fixed counter 0",
@@ -11,7 +10,6 @@
"CounterHTOff": "Fixed counter 0"
},
{
- "EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state.",
"Counter": "Fixed counter 1",
@@ -21,7 +19,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"Counter": "Fixed counter 1",
@@ -31,7 +28,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
"Counter": "Fixed counter 2",
@@ -1098,6 +1094,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.ALL",
+ "PublicDescription": "Counts the number of micro-ops retired. Use Cmask=1 and invert to count active cycles or stalled cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1142,6 +1139,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 uops or 4 instructions could retire each cycle.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1201,6 +1199,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PublicDescription": "Counts the number of conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1241,6 +1240,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PublicDescription": "Counts the number of near return instructions retired.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1261,6 +1261,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PublicDescription": "Number of near taken branches retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -1312,6 +1313,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PublicDescription": "Number of near branch instructions retired that were taken but mispredicted.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
index 999a01bc6467..5f6cb2abc384 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
@@ -1012,7 +1012,7 @@
"EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address ",
+ "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address",
"CounterHTOff": "0,1,2,3"
},
{
@@ -1036,7 +1036,7 @@
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data reads ",
+ "BriefDescription": "Counts all demand data reads",
"CounterHTOff": "0,1,2,3"
},
{
@@ -1048,7 +1048,7 @@
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand rfo's ",
+ "BriefDescription": "Counts all demand rfo's",
"CounterHTOff": "0,1,2,3"
},
{
@@ -1084,7 +1084,7 @@
"EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch prefetch RFOs ",
+ "BriefDescription": "Counts all demand & prefetch prefetch RFOs",
"CounterHTOff": "0,1,2,3"
},
{
@@ -1096,7 +1096,7 @@
"EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo references (demand & prefetch) ",
+ "BriefDescription": "Counts all data/code/rfo references (demand & prefetch)",
"CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
index 7c2679514efb..bc4d5fc284a0 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
@@ -1,164 +1,340 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
- "MetricGroup": "Frontend",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
},
{
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
index 0afbfd95ea30..2a0aad91d83d 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
@@ -1,6 +1,5 @@
[
{
- "EventCode": "0x00",
"Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
@@ -9,7 +8,6 @@
"CounterHTOff": "Fixed counter 0"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -19,7 +17,6 @@
},
{
"PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"AnyThread": "1",
@@ -29,7 +26,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
index 7c2679514efb..f3874b5f9995 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
@@ -1,164 +1,346 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
- "MetricGroup": "Frontend",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
},
{
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "MetricExpr": "cbox_0@event\\=0x0@",
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricGroup": "",
+ "MetricName": "Socket_CLKS"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
index 0afbfd95ea30..2a0aad91d83d 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
@@ -1,6 +1,5 @@
[
{
- "EventCode": "0x00",
"Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
@@ -9,7 +8,6 @@
"CounterHTOff": "Fixed counter 0"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -19,7 +17,6 @@
},
{
"PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"AnyThread": "1",
@@ -29,7 +26,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/cache.json b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
index ee22e4a5e30d..52dc6ef40e63 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/cache.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
@@ -31,7 +31,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "This event counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x41",
@@ -42,7 +42,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "This event counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x42",
@@ -179,7 +179,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier. ",
+ "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.",
"EventCode": "0x51",
"Counter": "0,1,2,3",
"UMask": "0x1",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
index fd7d7c438226..98c73e430b05 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
@@ -1,140 +1,232 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
- "MetricGroup": "Frontend",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "MetricExpr": "cbox_0@event\\=0x0@",
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricGroup": "",
+ "MetricName": "Socket_CLKS"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
index 34a519d9bfa0..783a5b4a67b1 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
@@ -1,7 +1,6 @@
[
{
- "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. ",
- "EventCode": "0x00",
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.",
"Counter": "Fixed counter 1",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
@@ -10,8 +9,7 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ",
- "EventCode": "0x00",
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"Counter": "Fixed counter 2",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -20,8 +18,7 @@
"CounterHTOff": "Fixed counter 2"
},
{
- "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ",
- "EventCode": "0x00",
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"Counter": "Fixed counter 3",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
@@ -778,7 +775,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
+ "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
"EventCode": "0x03",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -1098,7 +1095,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x2",
"AnyThread": "1",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
index e434ec723001..e847b0fd696d 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
@@ -32,16 +32,16 @@
"BriefDescription": "Counts the number of L2 cache misses"
},
{
- "PublicDescription": "This event counts the number of core cycles the fetch stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses. ",
+ "PublicDescription": "This event counts the number of core cycles the fetch stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses.",
"EventCode": "0x86",
"Counter": "0,1",
"UMask": "0x4",
"EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of core cycles the fetch stalls because of an icache miss. This is a cummulative count of core cycles the fetch stalled for all icache misses. "
+ "BriefDescription": "Counts the number of core cycles the fetch stalls because of an icache miss. This is a cummulative count of core cycles the fetch stalled for all icache misses."
},
{
- "PublicDescription": "This event counts the number of load micro-ops retired that miss in L1 Data cache. Note that prefetch misses will not be counted. ",
+ "PublicDescription": "This event counts the number of load micro-ops retired that miss in L1 Data cache. Note that prefetch misses will not be counted.",
"EventCode": "0x04",
"Counter": "0,1",
"UMask": "0x1",
@@ -115,29 +115,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000070 ",
+ "MSRValue": "0x4000000070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts any Prefetch requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400070 ",
+ "MSRValue": "0x1000400070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400070 ",
+ "MSRValue": "0x0800400070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE_E_F",
@@ -148,29 +148,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080070 ",
+ "MSRValue": "0x1000080070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080070 ",
+ "MSRValue": "0x0800080070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010070 ",
+ "MSRValue": "0x0000010070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.ANY_RESPONSE",
@@ -181,29 +181,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x40000032f7 ",
+ "MSRValue": "0x40000032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts any Read request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x10004032f7 ",
+ "MSRValue": "0x10004032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x08004032f7 ",
+ "MSRValue": "0x08004032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE_E_F",
@@ -214,29 +214,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x10000832f7 ",
+ "MSRValue": "0x10000832f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x08000832f7 ",
+ "MSRValue": "0x08000832f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x00000132f7 ",
+ "MSRValue": "0x00000132f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
@@ -247,29 +247,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000044 ",
+ "MSRValue": "0x4000000044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400044 ",
+ "MSRValue": "0x1000400044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400044 ",
+ "MSRValue": "0x0800400044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE_E_F",
@@ -280,29 +280,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080044 ",
+ "MSRValue": "0x1000080044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080044 ",
+ "MSRValue": "0x0800080044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010044 ",
+ "MSRValue": "0x0000010044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.ANY_RESPONSE",
@@ -313,29 +313,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000022 ",
+ "MSRValue": "0x4000000022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Demand cacheable data write requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400022 ",
+ "MSRValue": "0x1000400022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400022 ",
+ "MSRValue": "0x0800400022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE_E_F",
@@ -346,29 +346,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080022 ",
+ "MSRValue": "0x1000080022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080022 ",
+ "MSRValue": "0x0800080022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010022 ",
+ "MSRValue": "0x0000010022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
@@ -379,29 +379,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000003091 ",
+ "MSRValue": "0x4000003091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000403091 ",
+ "MSRValue": "0x1000403091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800403091 ",
+ "MSRValue": "0x0800403091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE_E_F",
@@ -412,29 +412,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000083091 ",
+ "MSRValue": "0x1000083091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800083091 ",
+ "MSRValue": "0x0800083091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000013091 ",
+ "MSRValue": "0x0000013091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
@@ -445,29 +445,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000008000 ",
+ "MSRValue": "0x4000008000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts any request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000408000 ",
+ "MSRValue": "0x1000408000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800408000 ",
+ "MSRValue": "0x0800408000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE_E_F",
@@ -478,29 +478,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000088000 ",
+ "MSRValue": "0x1000088000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800088000 ",
+ "MSRValue": "0x0800088000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000018000 ",
+ "MSRValue": "0x0000018000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
@@ -511,7 +511,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000014800 ",
+ "MSRValue": "0x0000014800",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
@@ -522,7 +522,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000014000 ",
+ "MSRValue": "0x0000014000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.ANY_RESPONSE",
@@ -533,29 +533,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000002000 ",
+ "MSRValue": "0x4000002000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts L1 data HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000402000 ",
+ "MSRValue": "0x1000402000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800402000 ",
+ "MSRValue": "0x0800402000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE_E_F",
@@ -566,29 +566,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000082000 ",
+ "MSRValue": "0x1000082000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800082000 ",
+ "MSRValue": "0x0800082000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000012000 ",
+ "MSRValue": "0x0000012000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
@@ -599,29 +599,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000001000 ",
+ "MSRValue": "0x4000001000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Software Prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000401000 ",
+ "MSRValue": "0x1000401000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800401000 ",
+ "MSRValue": "0x0800401000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE_E_F",
@@ -632,29 +632,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000081000 ",
+ "MSRValue": "0x1000081000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800081000 ",
+ "MSRValue": "0x0800081000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000011000 ",
+ "MSRValue": "0x0000011000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.ANY_RESPONSE",
@@ -665,7 +665,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010800 ",
+ "MSRValue": "0x0000010800",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
@@ -676,29 +676,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000400 ",
+ "MSRValue": "0x4000000400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Bus locks and split lock requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400400 ",
+ "MSRValue": "0x1000400400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400400 ",
+ "MSRValue": "0x0800400400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE_E_F",
@@ -709,29 +709,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080400 ",
+ "MSRValue": "0x1000080400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080400 ",
+ "MSRValue": "0x0800080400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010400 ",
+ "MSRValue": "0x0000010400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
@@ -742,29 +742,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000200 ",
+ "MSRValue": "0x4000000200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400200 ",
+ "MSRValue": "0x1000400200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400200 ",
+ "MSRValue": "0x0800400200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_FAR_TILE_E_F",
@@ -775,29 +775,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080200 ",
+ "MSRValue": "0x1000080200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080200 ",
+ "MSRValue": "0x0800080200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010200 ",
+ "MSRValue": "0x0000010200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.ANY_RESPONSE",
@@ -808,18 +808,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400100 ",
+ "MSRValue": "0x1000400100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400100 ",
+ "MSRValue": "0x0800400100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE_E_F",
@@ -830,29 +830,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080100 ",
+ "MSRValue": "0x1000080100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080100 ",
+ "MSRValue": "0x0800080100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010100 ",
+ "MSRValue": "0x0000010100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.ANY_RESPONSE",
@@ -863,29 +863,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000080 ",
+ "MSRValue": "0x4000000080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400080 ",
+ "MSRValue": "0x1000400080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400080 ",
+ "MSRValue": "0x0800400080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE_E_F",
@@ -896,29 +896,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080080 ",
+ "MSRValue": "0x1000080080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080080 ",
+ "MSRValue": "0x0800080080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010080 ",
+ "MSRValue": "0x0000010080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.ANY_RESPONSE",
@@ -929,29 +929,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000040 ",
+ "MSRValue": "0x4000000040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts L2 code HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400040 ",
+ "MSRValue": "0x1000400040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400040 ",
+ "MSRValue": "0x0800400040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE_E_F",
@@ -962,29 +962,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080040 ",
+ "MSRValue": "0x1000080040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080040 ",
+ "MSRValue": "0x0800080040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010040 ",
+ "MSRValue": "0x0000010040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.ANY_RESPONSE",
@@ -995,18 +995,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400020 ",
+ "MSRValue": "0x1000400020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400020 ",
+ "MSRValue": "0x0800400020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_FAR_TILE_E_F",
@@ -1017,29 +1017,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080020 ",
+ "MSRValue": "0x1000080020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080020 ",
+ "MSRValue": "0x0800080020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000020020 ",
+ "MSRValue": "0x0000020020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE",
@@ -1050,7 +1050,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010020 ",
+ "MSRValue": "0x0000010020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
@@ -1061,29 +1061,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000004 ",
+ "MSRValue": "0x4000000004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400004 ",
+ "MSRValue": "0x1000400004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400004 ",
+ "MSRValue": "0x0800400004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE_E_F",
@@ -1094,29 +1094,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080004 ",
+ "MSRValue": "0x1000080004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080004 ",
+ "MSRValue": "0x0800080004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010004 ",
+ "MSRValue": "0x0000010004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
@@ -1127,29 +1127,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000002 ",
+ "MSRValue": "0x4000000002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Demand cacheable data writes that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400002 ",
+ "MSRValue": "0x1000400002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400002 ",
+ "MSRValue": "0x0800400002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE_E_F",
@@ -1160,29 +1160,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080002 ",
+ "MSRValue": "0x1000080002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080002 ",
+ "MSRValue": "0x0800080002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010002 ",
+ "MSRValue": "0x0000010002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
@@ -1193,29 +1193,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x4000000001 ",
+ "MSRValue": "0x4000000001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000400001 ",
+ "MSRValue": "0x1000400001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800400001 ",
+ "MSRValue": "0x0800400001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_FAR_TILE_E_F",
@@ -1226,29 +1226,29 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1000080001 ",
+ "MSRValue": "0x1000080001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0800080001 ",
+ "MSRValue": "0x0800080001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0000010001 ",
+ "MSRValue": "0x0000010001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
@@ -1259,722 +1259,722 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000001 ",
+ "MSRValue": "0x0002000001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000002 ",
+ "MSRValue": "0x0002000002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000004 ",
+ "MSRValue": "0x0002000004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000020 ",
+ "MSRValue": "0x0002000020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000080 ",
+ "MSRValue": "0x0002000080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000100 ",
+ "MSRValue": "0x0002000100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000200 ",
+ "MSRValue": "0x0002000200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000400 ",
+ "MSRValue": "0x0002000400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002001000 ",
+ "MSRValue": "0x0002001000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002002000 ",
+ "MSRValue": "0x0002002000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002008000 ",
+ "MSRValue": "0x0002008000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002003091 ",
+ "MSRValue": "0x0002003091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000022 ",
+ "MSRValue": "0x0002000022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000044 ",
+ "MSRValue": "0x0002000044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x00020032f7 ",
+ "MSRValue": "0x00020032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0002000070 ",
+ "MSRValue": "0x0002000070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in M state ",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in M state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000001 ",
+ "MSRValue": "0x0004000001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000002 ",
+ "MSRValue": "0x0004000002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000004 ",
+ "MSRValue": "0x0004000004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000020 ",
+ "MSRValue": "0x0004000020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000040 ",
+ "MSRValue": "0x0004000040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000080 ",
+ "MSRValue": "0x0004000080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000100 ",
+ "MSRValue": "0x0004000100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000200 ",
+ "MSRValue": "0x0004000200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000400 ",
+ "MSRValue": "0x0004000400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004001000 ",
+ "MSRValue": "0x0004001000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004002000 ",
+ "MSRValue": "0x0004002000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004008000 ",
+ "MSRValue": "0x0004008000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004003091 ",
+ "MSRValue": "0x0004003091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000022 ",
+ "MSRValue": "0x0004000022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000044 ",
+ "MSRValue": "0x0004000044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x00040032f7 ",
+ "MSRValue": "0x00040032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0004000070 ",
+ "MSRValue": "0x0004000070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in E state ",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in E state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000001 ",
+ "MSRValue": "0x0008000001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000002 ",
+ "MSRValue": "0x0008000002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000004 ",
+ "MSRValue": "0x0008000004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000020 ",
+ "MSRValue": "0x0008000020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000080 ",
+ "MSRValue": "0x0008000080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000100 ",
+ "MSRValue": "0x0008000100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000200 ",
+ "MSRValue": "0x0008000200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000400 ",
+ "MSRValue": "0x0008000400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008001000 ",
+ "MSRValue": "0x0008001000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008002000 ",
+ "MSRValue": "0x0008002000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008008000 ",
+ "MSRValue": "0x0008008000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008003091 ",
+ "MSRValue": "0x0008003091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000022 ",
+ "MSRValue": "0x0008000022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0008000044 ",
+ "MSRValue": "0x0008000044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x00080032f7 ",
+ "MSRValue": "0x00080032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in S state ",
+ "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in S state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000001 ",
+ "MSRValue": "0x0010000001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000002 ",
+ "MSRValue": "0x0010000002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000004 ",
+ "MSRValue": "0x0010000004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000020 ",
+ "MSRValue": "0x0010000020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000040 ",
+ "MSRValue": "0x0010000040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000080 ",
+ "MSRValue": "0x0010000080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000100 ",
+ "MSRValue": "0x0010000100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000200 ",
+ "MSRValue": "0x0010000200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000400 ",
+ "MSRValue": "0x0010000400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010001000 ",
+ "MSRValue": "0x0010001000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010002000 ",
+ "MSRValue": "0x0010002000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010008000 ",
+ "MSRValue": "0x0010008000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010003091 ",
+ "MSRValue": "0x0010003091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000022 ",
+ "MSRValue": "0x0010000022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000044 ",
+ "MSRValue": "0x0010000044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x00100032f7 ",
+ "MSRValue": "0x00100032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0010000070 ",
+ "MSRValue": "0x0010000070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in F state ",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in F state",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180002 ",
+ "MSRValue": "0x1800180002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE",
@@ -1985,7 +1985,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180004 ",
+ "MSRValue": "0x1800180004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE",
@@ -1996,7 +1996,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180020 ",
+ "MSRValue": "0x1800180020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE",
@@ -2007,7 +2007,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180040 ",
+ "MSRValue": "0x1800180040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE",
@@ -2018,7 +2018,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180080 ",
+ "MSRValue": "0x1800180080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE",
@@ -2029,7 +2029,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180100 ",
+ "MSRValue": "0x1800180100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE",
@@ -2040,7 +2040,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180200 ",
+ "MSRValue": "0x1800180200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE",
@@ -2051,7 +2051,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180400 ",
+ "MSRValue": "0x1800180400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE",
@@ -2062,7 +2062,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800181000 ",
+ "MSRValue": "0x1800181000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE",
@@ -2073,7 +2073,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800182000 ",
+ "MSRValue": "0x1800182000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE",
@@ -2084,7 +2084,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800188000 ",
+ "MSRValue": "0x1800188000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE",
@@ -2095,7 +2095,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800183091 ",
+ "MSRValue": "0x1800183091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE",
@@ -2106,7 +2106,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180022 ",
+ "MSRValue": "0x1800180022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE",
@@ -2117,7 +2117,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180044 ",
+ "MSRValue": "0x1800180044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE",
@@ -2128,7 +2128,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x18001832f7 ",
+ "MSRValue": "0x18001832f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE",
@@ -2139,7 +2139,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800180070 ",
+ "MSRValue": "0x1800180070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE",
@@ -2150,7 +2150,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400002 ",
+ "MSRValue": "0x1800400002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE",
@@ -2161,7 +2161,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400004 ",
+ "MSRValue": "0x1800400004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE",
@@ -2172,7 +2172,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400040 ",
+ "MSRValue": "0x1800400040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE",
@@ -2183,7 +2183,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400080 ",
+ "MSRValue": "0x1800400080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE",
@@ -2194,7 +2194,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400100 ",
+ "MSRValue": "0x1800400100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE",
@@ -2205,7 +2205,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400400 ",
+ "MSRValue": "0x1800400400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE",
@@ -2216,7 +2216,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800401000 ",
+ "MSRValue": "0x1800401000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE",
@@ -2227,7 +2227,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800402000 ",
+ "MSRValue": "0x1800402000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE",
@@ -2238,7 +2238,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800408000 ",
+ "MSRValue": "0x1800408000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE",
@@ -2249,7 +2249,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800403091 ",
+ "MSRValue": "0x1800403091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE",
@@ -2260,7 +2260,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400022 ",
+ "MSRValue": "0x1800400022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE",
@@ -2271,7 +2271,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400044 ",
+ "MSRValue": "0x1800400044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE",
@@ -2282,7 +2282,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x18004032f7 ",
+ "MSRValue": "0x18004032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE",
@@ -2293,7 +2293,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x1800400070 ",
+ "MSRValue": "0x1800400070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/memory.json
index 700652566200..c6bb16ba0f86 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/memory.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/memory.json
@@ -9,18 +9,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400070 ",
+ "MSRValue": "0x0100400070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts any Prefetch requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200070 ",
+ "MSRValue": "0x0080200070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM_NEAR",
@@ -31,18 +31,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000070 ",
+ "MSRValue": "0x0101000070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts any Prefetch requests that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800070 ",
+ "MSRValue": "0x0080800070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.DDR_NEAR",
@@ -53,18 +53,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x01004032f7 ",
+ "MSRValue": "0x01004032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts any Read request that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x00802032f7 ",
+ "MSRValue": "0x00802032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM_NEAR",
@@ -75,18 +75,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x01010032f7 ",
+ "MSRValue": "0x01010032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts any Read request that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x00808032f7 ",
+ "MSRValue": "0x00808032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.DDR_NEAR",
@@ -97,18 +97,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400044 ",
+ "MSRValue": "0x0100400044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200044 ",
+ "MSRValue": "0x0080200044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM_NEAR",
@@ -119,18 +119,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000044 ",
+ "MSRValue": "0x0101000044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800044 ",
+ "MSRValue": "0x0080800044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR_NEAR",
@@ -141,18 +141,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400022 ",
+ "MSRValue": "0x0100400022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200022 ",
+ "MSRValue": "0x0080200022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM_NEAR",
@@ -163,18 +163,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000022 ",
+ "MSRValue": "0x0101000022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800022 ",
+ "MSRValue": "0x0080800022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR_NEAR",
@@ -185,18 +185,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100403091 ",
+ "MSRValue": "0x0100403091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080203091 ",
+ "MSRValue": "0x0080203091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM_NEAR",
@@ -207,18 +207,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101003091 ",
+ "MSRValue": "0x0101003091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080803091 ",
+ "MSRValue": "0x0080803091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR_NEAR",
@@ -229,18 +229,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100408000 ",
+ "MSRValue": "0x0100408000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts any request that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080208000 ",
+ "MSRValue": "0x0080208000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM_NEAR",
@@ -251,18 +251,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101008000 ",
+ "MSRValue": "0x0101008000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts any request that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080808000 ",
+ "MSRValue": "0x0080808000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR_NEAR",
@@ -273,18 +273,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100402000 ",
+ "MSRValue": "0x0100402000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080202000 ",
+ "MSRValue": "0x0080202000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.MCDRAM_NEAR",
@@ -295,18 +295,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101002000 ",
+ "MSRValue": "0x0101002000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080802000 ",
+ "MSRValue": "0x0080802000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR_NEAR",
@@ -317,18 +317,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100401000 ",
+ "MSRValue": "0x0100401000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts Software Prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080201000 ",
+ "MSRValue": "0x0080201000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM_NEAR",
@@ -339,18 +339,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101001000 ",
+ "MSRValue": "0x0101001000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts Software Prefetches that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080801000 ",
+ "MSRValue": "0x0080801000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR_NEAR",
@@ -361,18 +361,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400400 ",
+ "MSRValue": "0x0100400400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200400 ",
+ "MSRValue": "0x0080200400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM_NEAR",
@@ -383,18 +383,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000400 ",
+ "MSRValue": "0x0101000400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800400 ",
+ "MSRValue": "0x0080800400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR_NEAR",
@@ -405,18 +405,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400200 ",
+ "MSRValue": "0x0100400200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200200 ",
+ "MSRValue": "0x0080200200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM_NEAR",
@@ -427,18 +427,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000200 ",
+ "MSRValue": "0x0101000200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800200 ",
+ "MSRValue": "0x0080800200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR_NEAR",
@@ -449,18 +449,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400100 ",
+ "MSRValue": "0x0100400100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM_FAR",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200100 ",
+ "MSRValue": "0x0080200100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM_NEAR",
@@ -471,18 +471,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000100 ",
+ "MSRValue": "0x0101000100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.DDR_FAR",
"MSRIndex": "0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800100 ",
+ "MSRValue": "0x0080800100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.DDR_NEAR",
@@ -493,7 +493,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x2000020080 ",
+ "MSRValue": "0x2000020080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.NON_DRAM",
@@ -504,18 +504,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400080 ",
+ "MSRValue": "0x0100400080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200080 ",
+ "MSRValue": "0x0080200080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM_NEAR",
@@ -526,18 +526,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000080 ",
+ "MSRValue": "0x0101000080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800080 ",
+ "MSRValue": "0x0080800080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR_NEAR",
@@ -548,18 +548,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400040 ",
+ "MSRValue": "0x0100400040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200040 ",
+ "MSRValue": "0x0080200040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.MCDRAM_NEAR",
@@ -570,18 +570,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000040 ",
+ "MSRValue": "0x0101000040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800040 ",
+ "MSRValue": "0x0080800040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR_NEAR",
@@ -592,7 +592,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x2000020020 ",
+ "MSRValue": "0x2000020020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.NON_DRAM",
@@ -603,18 +603,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400020 ",
+ "MSRValue": "0x0100400020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200020 ",
+ "MSRValue": "0x0080200020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM_NEAR",
@@ -625,18 +625,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000020 ",
+ "MSRValue": "0x0101000020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800020 ",
+ "MSRValue": "0x0080800020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR_NEAR",
@@ -647,18 +647,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400004 ",
+ "MSRValue": "0x0100400004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200004 ",
+ "MSRValue": "0x0080200004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM_NEAR",
@@ -669,18 +669,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000004 ",
+ "MSRValue": "0x0101000004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800004 ",
+ "MSRValue": "0x0080800004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR_NEAR",
@@ -691,18 +691,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400002 ",
+ "MSRValue": "0x0100400002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200002 ",
+ "MSRValue": "0x0080200002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM_NEAR",
@@ -713,18 +713,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000002 ",
+ "MSRValue": "0x0101000002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800002 ",
+ "MSRValue": "0x0080800002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR_NEAR",
@@ -735,18 +735,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0100400001 ",
+ "MSRValue": "0x0100400001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080200001 ",
+ "MSRValue": "0x0080200001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM_NEAR",
@@ -757,18 +757,18 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0101000001 ",
+ "MSRValue": "0x0101000001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from DRAM Far. ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from DRAM Far.",
"Offcore": "1"
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0080800001 ",
+ "MSRValue": "0x0080800001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR_NEAR",
@@ -779,7 +779,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600001 ",
+ "MSRValue": "0x0180600001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM",
@@ -790,7 +790,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600002 ",
+ "MSRValue": "0x0180600002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM",
@@ -801,7 +801,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600004 ",
+ "MSRValue": "0x0180600004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM",
@@ -812,7 +812,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600020 ",
+ "MSRValue": "0x0180600020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM",
@@ -823,7 +823,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600080 ",
+ "MSRValue": "0x0180600080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM",
@@ -834,7 +834,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600100 ",
+ "MSRValue": "0x0180600100",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM",
@@ -845,7 +845,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600200 ",
+ "MSRValue": "0x0180600200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM",
@@ -856,7 +856,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600400 ",
+ "MSRValue": "0x0180600400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM",
@@ -867,7 +867,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180601000 ",
+ "MSRValue": "0x0180601000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM",
@@ -878,7 +878,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180608000 ",
+ "MSRValue": "0x0180608000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM",
@@ -889,7 +889,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180603091 ",
+ "MSRValue": "0x0180603091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM",
@@ -900,7 +900,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600022 ",
+ "MSRValue": "0x0180600022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM",
@@ -911,7 +911,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600044 ",
+ "MSRValue": "0x0180600044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM",
@@ -922,7 +922,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x01806032f7 ",
+ "MSRValue": "0x01806032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM",
@@ -933,7 +933,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0180600070 ",
+ "MSRValue": "0x0180600070",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM",
@@ -944,7 +944,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800001 ",
+ "MSRValue": "0x0181800001",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR",
@@ -955,7 +955,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800002 ",
+ "MSRValue": "0x0181800002",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR",
@@ -966,7 +966,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800004 ",
+ "MSRValue": "0x0181800004",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR",
@@ -977,7 +977,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800020 ",
+ "MSRValue": "0x0181800020",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR",
@@ -988,7 +988,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800040 ",
+ "MSRValue": "0x0181800040",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR",
@@ -999,7 +999,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800080 ",
+ "MSRValue": "0x0181800080",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR",
@@ -1010,7 +1010,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800200 ",
+ "MSRValue": "0x0181800200",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR",
@@ -1021,7 +1021,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800400 ",
+ "MSRValue": "0x0181800400",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR",
@@ -1032,7 +1032,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181801000 ",
+ "MSRValue": "0x0181801000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR",
@@ -1043,7 +1043,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181802000 ",
+ "MSRValue": "0x0181802000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR",
@@ -1054,7 +1054,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181808000 ",
+ "MSRValue": "0x0181808000",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR",
@@ -1065,7 +1065,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181803091 ",
+ "MSRValue": "0x0181803091",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR",
@@ -1076,7 +1076,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800022 ",
+ "MSRValue": "0x0181800022",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR",
@@ -1087,7 +1087,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x0181800044 ",
+ "MSRValue": "0x0181800044",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR",
@@ -1098,7 +1098,7 @@
},
{
"EventCode": "0xB7",
- "MSRValue": "0x01818032f7 ",
+ "MSRValue": "0x01818032f7",
"Counter": "0,1",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_READ.DDR",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json b/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
index bb5494cfb5ae..92e4ef2e22c6 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
@@ -144,7 +144,7 @@
"BriefDescription": "Counts the number of micro-ops retired that are from the complex flows issued by the micro-sequencer (MS)."
},
{
- "PublicDescription": "This event counts the number of micro-ops (uops) retired. The processor decodes complex macro instructions into a sequence of simpler uops. Most instructions are composed of one or two uops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists. ",
+ "PublicDescription": "This event counts the number of micro-ops (uops) retired. The processor decodes complex macro instructions into a sequence of simpler uops. Most instructions are composed of one or two uops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists.",
"EventCode": "0xC2",
"Counter": "0,1",
"UMask": "0x10",
@@ -218,7 +218,7 @@
"UMask": "0x20",
"EventName": "NO_ALLOC_CYCLES.RAT_STALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and a RATstall (caused by reservation station full) is asserted. "
+ "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and a RATstall (caused by reservation station full) is asserted."
},
{
"PublicDescription": "This event counts the number of core cycles when no uops are allocated, the instruction queue is empty and the alloc pipe is stalled waiting for instructions to be fetched.",
@@ -251,7 +251,7 @@
"UMask": "0x1f",
"EventName": "RS_FULL_STALL.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the total number of core cycles the Alloc pipeline is stalled when any one of the reservation stations is full. "
+ "BriefDescription": "Counts the total number of core cycles the Alloc pipeline is stalled when any one of the reservation stations is full."
},
{
"EventCode": "0xC0",
@@ -268,11 +268,10 @@
"UMask": "0x1",
"EventName": "CYCLES_DIV_BUSY.ALL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles the number of core cycles when divider is busy. Does not imply a stall waiting for the divider. "
+ "BriefDescription": "Cycles the number of core cycles when divider is busy. Does not imply a stall waiting for the divider."
},
{
"PublicDescription": "This event counts the number of instructions that retire. For instructions that consist of multiple micro-ops, this event counts exactly once, as the last micro-op of the instruction retires. The event continues counting while instructions retire, including during interrupt service routines caused by hardware interrupts, faults or traps.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
@@ -296,8 +295,7 @@
"BriefDescription": "Counts the number of unhalted reference clock cycles"
},
{
- "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter\r\n",
- "EventCode": "0x00",
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter",
"Counter": "Fixed counter 2",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -305,7 +303,6 @@
"BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 3",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
@@ -343,7 +340,7 @@
"UMask": "0x1",
"EventName": "RECYCLEQ.LD_BLOCK_ST_FORWARD",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of occurences a retired load gets blocked because its address partially overlaps with a store ",
+ "BriefDescription": "Counts the number of occurences a retired load gets blocked because its address partially overlaps with a store",
"Data_LA": "1"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
index f31594507f8c..9e493977771f 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
@@ -36,7 +36,7 @@
"EdgeDetect": "1"
},
{
- "PublicDescription": "This event counts every cycle when an I-side (walks due to an instruction fetch) page walk is in progress. ",
+ "PublicDescription": "This event counts every cycle when an I-side (walks due to an instruction fetch) page walk is in progress.",
"EventCode": "0x05",
"Counter": "0,1",
"UMask": "0x2",
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index e05c2c8458fc..d6984a3017e0 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -33,3 +33,4 @@ GenuineIntel-6-25,v2,westmereep-sp,core
GenuineIntel-6-2F,v2,westmereex,core
GenuineIntel-6-55-[01234],v1,skylakex,core
GenuineIntel-6-55-[56789ABCDEF],v1,cascadelakex,core
+AuthenticAMD-23-[[:xdigit:]]+,v1,amdfam17h,core
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/cache.json b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
index 16b04a20bc12..bb79e89c2049 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
@@ -1,207 +1,200 @@
[
{
- "PEBS": "1",
- "EventCode": "0xD0",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x11",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xD0",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x12",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xD0",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with locked access.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
- "EventCode": "0xD0",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that miss L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
- "EventCode": "0xD0",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0xc",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests to L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of load uops retired",
- "EventCode": "0xD0",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of store uops retired.",
- "EventCode": "0xD0",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xD1",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 code requests.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xD1",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40",
+ "EventName": "L2_RQSTS.PF_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts retired load uops that hit in the last-level (L3) cache without snoops required.",
- "EventCode": "0xD1",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x80",
+ "EventName": "L2_RQSTS.PF_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xD1",
+ "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0xc0",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from L2 hardware prefetchers.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xD2",
+ "EventCode": "0x27",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a non-modified state.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "L2_STORE_LOCK_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that miss cache lines.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a modified state, so the line had to be invalidated in that L2 cache and transferred to the requesting L2.",
- "EventCode": "0xD2",
+ "EventCode": "0x27",
"Counter": "0,1,2,3",
"UMask": "0x4",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_E",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that hit cache lines in E state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xD2",
+ "EventCode": "0x27",
"Counter": "0,1,2,3",
"UMask": "0x8",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that hit cache lines in M state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts retired demand loads that missed the last-level (L3) cache. This means that the load is usually satisfied from memory in a client system or possibly from the remote socket in a server. Demand loads are non speculative load uops.",
- "EventCode": "0xD4",
+ "EventCode": "0x27",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with unknown information as data source in cache serviced the load.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0xf",
+ "EventName": "L2_STORE_LOCK_RQSTS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that access cache lines in any state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier. ",
- "EventCode": "0x51",
+ "EventCode": "0x28",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "L1D.REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D data line replacements.",
+ "EventName": "L2_L1D_WB_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x51",
+ "EventCode": "0x28",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "L1D.ALLOCATED_IN_M",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Allocated L1D data cache lines in M state.",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_S",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x51",
+ "EventCode": "0x28",
"Counter": "0,1,2,3",
"UMask": "0x4",
- "EventName": "L1D.EVICTION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D data cache lines in M state evicted due to replacement.",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_E",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x51",
+ "EventCode": "0x28",
"Counter": "0,1,2,3",
"UMask": "0x8",
- "EventName": "L1D.ALL_M_REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_M",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_L1D_WB_RQSTS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests missed LLC.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to LLC.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -224,12 +217,61 @@
"CounterHTOff": "2"
},
{
- "EventCode": "0x63",
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when L1D is locked.",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.",
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1D.REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D data line replacements.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D.ALLOCATED_IN_M",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Allocated L1D data cache lines in M state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L1D.EVICTION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D data cache lines in M state evicted due to replacement.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L1D.ALL_M_REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -254,6 +296,16 @@
{
"EventCode": "0x60",
"Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"SampleAfterValue": "2000003",
@@ -263,6 +315,16 @@
{
"EventCode": "0x60",
"Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"SampleAfterValue": "2000003",
@@ -280,6 +342,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x63",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when L1D is locked.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xB0",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -325,148 +396,182 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x24",
+ "EventCode": "0xBF",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache.",
+ "UMask": "0x5",
+ "EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x24",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_RQSTS.RFO_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x11",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x24",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x12",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x24",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x21",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x24",
+ "PEBS": "1",
+ "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K). (Precise Event - PEBS)",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x24",
+ "PEBS": "1",
+ "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K). (Precise Event - PEBS)",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "L2_RQSTS.PF_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x42",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x24",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of load uops retired (Precise Event)",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_RQSTS.PF_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired load uops. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x27",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of store uops retired. (Precise Event - PEBS)",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired store uops. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "L2_STORE_LOCK_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFOs that miss cache lines.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x27",
+ "PEBS": "1",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_STORE_LOCK_RQSTS.HIT_E",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFOs that hit cache lines in E state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x27",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops that hit in the last-level (L3) cache without snoops required. (Precise Event - PEBS)",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFOs that hit cache lines in M state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x27",
+ "PEBS": "1",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_STORE_LOCK_RQSTS.ALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFOs that access cache lines in any state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x28",
+ "PEBS": "1",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "L2_L1D_WB_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x28",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a non-modified state. (Precise Event - PEBS)",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "L2_L1D_WB_RQSTS.HIT_S",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x28",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a modified state, so the line had to be invalidated in that L2 cache and transferred to the requesting L2. (Precise Event - PEBS)",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x4",
- "EventName": "L2_L1D_WB_RQSTS.HIT_E",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x28",
+ "PEBS": "1",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x8",
- "EventName": "L2_L1D_WB_RQSTS.HIT_M",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x28",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired demand loads that missed the last-level (L3) cache. This means that the load is usually satisfied from memory in a client system or possibly from the remote socket in a server. Demand loads are non speculative load uops. (Precise Event - PEBS)",
+ "EventCode": "0xD4",
"Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_L1D_WB_RQSTS.ALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load uops with unknown information as data source in cache serviced the load. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xF0",
@@ -623,24 +728,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests missed LLC.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests that refer to LLC.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xF4",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -650,93 +737,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "L2_RQSTS.ALL_RFO",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc0",
- "EventName": "L2_RQSTS.ALL_PF",
- "SampleAfterValue": "200003",
- "BriefDescription": "Requests from L2 hardware prefetchers.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xBF",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0244",
"Counter": "0,1,2,3",
@@ -1825,7 +1825,7 @@
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = DATA_INTO_CORE and RESPONSE = ANY_RESPONSE",
+ "BriefDescription": "REQUEST = DATA_INTO_CORE and RESPONSE = ANY_RESPONSE",
"CounterHTOff": "0,1,2,3"
},
{
@@ -1837,7 +1837,7 @@
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_M.HITM",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_M and SNOOP = HITM",
+ "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_M and SNOOP = HITM",
"CounterHTOff": "0,1,2,3"
},
{
@@ -1849,7 +1849,7 @@
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = PF_RFO and RESPONSE = ANY_RESPONSE",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = ANY_RESPONSE",
"CounterHTOff": "0,1,2,3"
},
{
@@ -1861,7 +1861,7 @@
"EventName": "OFFCORE_RESPONSE.PF_L_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = PF_LLC_DATA_RD and RESPONSE = ANY_RESPONSE",
+ "BriefDescription": "REQUEST = PF_LLC_DATA_RD and RESPONSE = ANY_RESPONSE",
"CounterHTOff": "0,1,2,3"
},
{
@@ -1873,7 +1873,7 @@
"EventName": "OFFCORE_RESPONSE.PF_L_IFETCH.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = PF_LLC_IFETCH and RESPONSE = ANY_RESPONSE",
+ "BriefDescription": "REQUEST = PF_LLC_IFETCH and RESPONSE = ANY_RESPONSE",
"CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json b/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
index 982eda48785e..ce26537c7d47 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
@@ -1,68 +1,5 @@
[
{
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OTHER_ASSISTS.AVX_STORE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "FP_ASSIST.X87_OUTPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to output value.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "FP_ASSIST.X87_INPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to input value.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "FP_ASSIST.SIMD_OUTPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to Output values.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "FP_ASSIST.SIMD_INPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to input values.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x10",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -126,6 +63,69 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OTHER_ASSISTS.AVX_STORE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of X87 assists due to output value.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_ASSIST.X87_INPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of X87 assists due to input value.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of SIMD FP assists due to Output values.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of SIMD FP assists due to input values.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x1e",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json b/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
index 1b7b1dd36c68..e58ed14a204c 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
@@ -1,24 +1,5 @@
[
{
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ICACHE.HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ICACHE.MISSES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction cache, streaming buffer and victim cache misses.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -39,159 +20,201 @@
{
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more information.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled. In the ideal case 4 uops can be delivered each cycle. The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them. This event is used in determining the front-end bound category of the top-down pipeline slots characterization.",
- "EventCode": "0x9C",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "UMask": "0x10",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_DSB_OCCUR",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x9C",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.",
"CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x9C",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xAB",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline. It excludes cycles when the back-end cannot accept new micro-ops. The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.",
- "EventCode": "0xAB",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xAC",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DSB_FILL.OTHER_CANCEL",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.",
+ "BriefDescription": "Cycles MITE is delivering any Uop.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xAC",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "DSB_FILL.EXCEED_DSB_LINES",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance. See the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual for more information.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_CYCLES",
+ "UMask": "0x3c",
+ "EventName": "IDQ.MITE_ALL_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "CounterMask": "1",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x79",
+ "EventCode": "0x80",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_DSB_OCCUR",
+ "UMask": "0x1",
+ "EventName": "ICACHE.HIT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
- "CounterMask": "1",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Instruction cache, streaming buffer and victim cache misses.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled. In the ideal case 4 uops can be delivered each cycle. The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them. This event is used in determining the front-end bound category of the top-down pipeline slots characterization.",
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -223,83 +246,60 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
+ "EventCode": "0x9C",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x79",
+ "EventCode": "0xAB",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "UMask": "0x1",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering 4 Uops.",
- "CounterMask": "4",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x79",
+ "PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline. It excludes cycles when the back-end cannot accept new micro-ops. The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.",
+ "EventCode": "0xAB",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "UMask": "0x2",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering any Uop.",
- "CounterMask": "1",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xAC",
"Counter": "0,1,2,3",
- "UMask": "0xa",
- "EventName": "DSB_FILL.ALL_CANCEL",
+ "UMask": "0x2",
+ "EventName": "DSB_FILL.OTHER_CANCEL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.",
+ "BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x9C",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x79",
+ "EventCode": "0xAC",
"Counter": "0,1,2,3",
- "UMask": "0x3c",
- "EventName": "IDQ.MITE_ALL_UOPS",
+ "UMask": "0x8",
+ "EventName": "DSB_FILL.EXCEED_DSB_LINES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x79",
+ "EventCode": "0xAC",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
+ "UMask": "0xa",
+ "EventName": "DSB_FILL.ALL_CANCEL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "CounterMask": "1",
+ "BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/memory.json b/tools/perf/pmu-events/arch/x86/sandybridge/memory.json
index e6dfa89d00f3..78c1a987f9a2 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/memory.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/memory.json
@@ -1,5 +1,32 @@
[
{
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "PAGE_WALKS.LLC_MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of any page walk that had a miss in LLC. Does not necessary cause a SUSPEND.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from memory disambiguation, external snoops, or cross SMT-HW-thread snoop (stores) hitting load buffers. Machine clears can have a significant performance impact if they are happening frequently.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
@@ -126,33 +153,6 @@
"CounterHTOff": "3"
},
{
- "EventCode": "0xBE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "PAGE_WALKS.LLC_MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of any page walk that had a miss in LLC. Does not necessary cause a SUSPEND.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MISALIGN_MEM_REF.LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MISALIGN_MEM_REF.STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x300400244",
"Counter": "0,1,2,3",
@@ -367,7 +367,7 @@
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = ANY_REQUEST and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"CounterHTOff": "0,1,2,3"
},
{
@@ -379,7 +379,7 @@
"EventName": "OFFCORE_RESPONSE.DATA_IN_SOCKET.LLC_MISS_LOCAL.ANY_LLC_HIT",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = DATA_IN_SOCKET and RESPONSE = LLC_MISS_LOCAL and SNOOP = ANY_LLC_HIT",
+ "BriefDescription": "REQUEST = DATA_IN_SOCKET and RESPONSE = LLC_MISS_LOCAL and SNOOP = ANY_LLC_HIT",
"CounterHTOff": "0,1,2,3"
},
{
@@ -391,7 +391,7 @@
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = DEMAND_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"CounterHTOff": "0,1,2,3"
},
{
@@ -403,7 +403,7 @@
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = PF_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"CounterHTOff": "0,1,2,3"
},
{
@@ -415,7 +415,7 @@
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = PF_RFO and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"CounterHTOff": "0,1,2,3"
},
{
@@ -427,7 +427,7 @@
"EventName": "OFFCORE_RESPONSE.PF_L_DATA_RD.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = PF_LLC_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "BriefDescription": "REQUEST = PF_LLC_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"CounterHTOff": "0,1,2,3"
},
{
@@ -439,7 +439,7 @@
"EventName": "OFFCORE_RESPONSE.PF_L_IFETCH.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": " REQUEST = PF_LLC_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "BriefDescription": "REQUEST = PF_LLC_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/other.json b/tools/perf/pmu-events/arch/x86/sandybridge/other.json
index 64b195b82c50..874eb40a2e0f 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/other.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/other.json
@@ -9,6 +9,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x4E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "HW_PRE_REQ.DL1_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x5C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -38,15 +47,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x4E",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "HW_PRE_REQ.DL1_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x63",
"Counter": "0,1,2,3",
"UMask": "0x1",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
index 34a519d9bfa0..b7150f65f16d 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
@@ -1,289 +1,307 @@
[
{
- "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. ",
- "EventCode": "0x00",
- "Counter": "Fixed counter 1",
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.",
+ "Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
- "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ",
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
+ "Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when the thread is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
},
{
- "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ",
- "EventCode": "0x00",
- "Counter": "Fixed counter 3",
- "UMask": "0x3",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
- },
- {
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not taken macro-conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x88",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branches.",
+ "UMask": "0x1",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "UMask": "0x2",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "UMask": "0x10",
+ "EventName": "LD_BLOCKS.ALL_BLOCK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline. The enhanced address check typically has a performance penalty of 5 cycles.",
+ "EventCode": "0x07",
"Counter": "0,1,2,3",
- "UMask": "0x90",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired direct near calls.",
+ "UMask": "0x1",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "False dependencies in MOB due to partial compare.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "EventCode": "0x07",
"Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect calls.",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "EventCode": "0x0D",
"Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-conditional branches.",
+ "UMask": "0x3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "EventCode": "0x0D",
"Counter": "0,1,2,3",
- "UMask": "0xc2",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "UMask": "0x3",
+ "EdgeDetect": "1",
+ "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "EventCode": "0x0D",
"Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "UMask": "0x3",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "EventCode": "0x0D",
"Counter": "0,1,2,3",
- "UMask": "0xc8",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect return branches.",
+ "UMask": "0x40",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.",
+ "EventCode": "0x0E",
"Counter": "0,1,2,3",
- "UMask": "0xd0",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired direct near calls.",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x89",
+ "EventCode": "0x0E",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x89",
+ "EventCode": "0x0E",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x89",
+ "EventCode": "0x14",
"Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "UMask": "0x1",
+ "EventName": "ARITH.FPU_DIV_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when divider is busy executing divide operations.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x89",
+ "PublicDescription": "This event counts the number of the divide operations executed.",
+ "EventCode": "0x14",
"Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "ARITH.FPU_DIV",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Divide operations executed.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x89",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x90",
- "EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted direct near calls.",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Thread cycles when thread is not in halt state.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x89",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x89",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x89",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x89",
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0xd0",
- "EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted direct near calls.",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state.",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA8",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.UOPS",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of Uops delivered by the LSD.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xA8",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "1",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x87",
+ "EventCode": "0x4C",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "ILD_STALL.LCP",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x87",
+ "EventCode": "0x4C",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ILD_STALL.IQ_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stall cycles because IQ is full.",
+ "UMask": "0x2",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0D",
+ "EventCode": "0x59",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "UMask": "0x20",
+ "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.",
+ "BriefDescription": "Increments the number of flags-merge uops in flight each cycle.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual.",
"EventCode": "0x59",
"Counter": "0,1,2,3",
"UMask": "0x20",
- "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP",
+ "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Increments the number of flags-merge uops in flight each cycle.",
+ "BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
+ "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
"EventCode": "0x59",
"Counter": "0,1,2,3",
"UMask": "0x40",
@@ -302,48 +320,21 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RESOURCE_STALLS.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Resource-related stall cycles.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "RESOURCE_STALLS.LB",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the cycles of stall due to lack of load buffers.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "RESOURCE_STALLS.RS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA2",
+ "EventCode": "0x5B",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RESOURCE_STALLS.SB",
+ "UMask": "0xc",
+ "EventName": "RESOURCE_STALLS2.ALL_FL_EMPTY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "BriefDescription": "Cycles with either free list is empty.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA2",
+ "EventCode": "0x5B",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "RESOURCE_STALLS.ROB",
+ "UMask": "0xf",
+ "EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "BriefDescription": "Resource stalls2 control structures full for physical registers.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -356,702 +347,663 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.",
- "EventCode": "0x0E",
+ "EventCode": "0x5B",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.ANY",
+ "UMask": "0x4f",
+ "EventName": "RESOURCE_STALLS2.OOO_RSRC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).",
+ "BriefDescription": "Resource stalls out of order resources full.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0E",
- "Invert": "1",
+ "EventCode": "0x5E",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0E",
+ "EventCode": "0x5E",
"Invert": "1",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5E",
+ "EventCode": "0x87",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "EventName": "ILD_STALL.LCP",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xCC",
+ "EventCode": "0x87",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "UMask": "0x4",
+ "EventName": "ILD_STALL.IQ_FULL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count cases of saving new LBR.",
+ "BriefDescription": "Stall cycles because IQ is full.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
- "EventCode": "0xC3",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "100003",
- "BriefDescription": "Self-modifying code (SMC) detected.",
+ "UMask": "0x41",
+ "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not taken macro-conditional branches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
- "EventCode": "0xC3",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "MACHINE_CLEARS.MASKMOV",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "UMask": "0x81",
+ "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired macro-conditional branches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC0",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "INST_RETIRED.ANY_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event.",
+ "UMask": "0x82",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of micro-ops retired.",
- "EventCode": "0xC2",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.ALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Actually retired uops.",
+ "UMask": "0x84",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 micro-ops or 4 instructions could retire each cycle. This event is used in determining the 'Retiring' category of the Top-Down pipeline slots characterization.",
- "EventCode": "0xC2",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used.",
+ "UMask": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x90",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired direct near calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "CounterMask": "10",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0xa0",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired.",
+ "UMask": "0xc1",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired macro-conditional branches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired.",
+ "UMask": "0xc2",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC4",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
+ "UMask": "0xc4",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired.",
+ "UMask": "0xc8",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired indirect return branches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC4",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired.",
+ "UMask": "0xd0",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired direct near calls.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
+ "EventCode": "0x88",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired.",
+ "UMask": "0xff",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired branches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC4",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired.",
+ "UMask": "0x41",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "EventCode": "0xC4",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x81",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "UMask": "0x84",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_MISP_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect mispredicted near call instructions retired.",
+ "UMask": "0x88",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC5",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "UMask": "0x90",
+ "EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted direct near calls.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BR_MISP_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted not taken branch instructions retired.",
+ "UMask": "0xa0",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted taken branch instructions retired.",
+ "UMask": "0xc1",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "PublicDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
- "EventCode": "0xC5",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0xc4",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC1",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired instructions experiencing ITLB misses.",
+ "UMask": "0xd0",
+ "EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted direct near calls.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x14",
+ "EventCode": "0x89",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ARITH.FPU_DIV_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when divider is busy executing divide operations.",
+ "UMask": "0xff",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of the divide operations executed.",
- "EventCode": "0x14",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "ARITH.FPU_DIV",
- "SampleAfterValue": "100003",
- "BriefDescription": "Divide operations executed.",
- "CounterMask": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 0.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "UOPS_DISPATCHED.THREAD",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched per thread.",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 0.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "UOPS_DISPATCHED.CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched from any thread.",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 1.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 0.",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 1.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "UMask": "0xc",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 1.",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "UMask": "0xc",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 4.",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "UMask": "0x30",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 5.",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH",
+ "UMask": "0x30",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
- "CounterMask": "2",
- "CounterHTOff": "2"
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.",
- "CounterMask": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 4.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
- "CounterMask": "6",
- "CounterHTOff": "2"
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles per core when uops are dispatched to port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x4C",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "LOAD_HIT_PRE.SW_PF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource-related stall cycles.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x4C",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "LOAD_HIT_PRE.HW_PF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS.DATA_UNKNOWN",
- "SampleAfterValue": "100003",
- "BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.",
+ "EventName": "RESOURCE_STALLS.LB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the cycles of stall due to lack of load buffers.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
- "EventCode": "0x03",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.",
+ "UMask": "0x4",
+ "EventName": "RESOURCE_STALLS.RS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x03",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
"UMask": "0x8",
- "EventName": "LD_BLOCKS.NO_SR",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventName": "RESOURCE_STALLS.SB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x03",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "LD_BLOCKS.ALL_BLOCK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).",
+ "UMask": "0xa",
+ "EventName": "RESOURCE_STALLS.LB_SB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource stalls due to load or store buffers all being in use.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline. The enhanced address check typically has a performance penalty of 5 cycles.",
- "EventCode": "0x07",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "SampleAfterValue": "100003",
- "BriefDescription": "False dependencies in MOB due to partial compare.",
+ "UMask": "0xe",
+ "EventName": "RESOURCE_STALLS.MEM_RS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x07",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.",
+ "UMask": "0x10",
+ "EventName": "RESOURCE_STALLS.ROB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB6",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "AGU_BYPASS_CANCEL.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.",
+ "UMask": "0xf0",
+ "EventName": "RESOURCE_STALLS.OOO_RSRC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
+ "EventCode": "0xA3",
+ "Counter": "2",
"UMask": "0x2",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "CounterMask": "2",
+ "CounterHTOff": "2"
},
{
- "EventCode": "0xA1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xA1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "CounterMask": "6",
+ "CounterHTOff": "2"
},
{
- "EventCode": "0xA1",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "UMask": "0x1",
+ "EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 5.",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA1",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA1",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA1",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+ "UMask": "0x1",
+ "EventName": "UOPS_DISPATCHED.THREAD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.",
+ "BriefDescription": "Uops dispatched per thread.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA1",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "BriefDescription": "Uops dispatched from any thread.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "EventCode": "0xC0",
- "Counter": "1",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.PREC_DIST",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired. (Precise Event - PEBS).",
- "TakenAlone": "1",
- "CounterHTOff": "1"
- },
- {
- "EventCode": "0x5B",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls2 control structures full for physical registers.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5B",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "RESOURCE_STALLS2.ALL_FL_EMPTY",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with either free list is empty.",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA2",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "RESOURCE_STALLS.MEM_RS",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA2",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0xf0",
- "EventName": "RESOURCE_STALLS.OOO_RSRC",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5B",
+ "EventCode": "0xB1",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x4f",
- "EventName": "RESOURCE_STALLS2.OOO_RSRC",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls out of order resources full.",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA2",
+ "EventCode": "0xB6",
"Counter": "0,1,2,3",
- "UMask": "0xa",
- "EventName": "RESOURCE_STALLS.LB_SB",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls due to load or store buffers all being in use.",
+ "UMask": "0x1",
+ "EventName": "AGU_BYPASS_CANCEL.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0D",
+ "EventCode": "0xC0",
"Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY_P",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
- "CounterMask": "1",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel? 64 and IA-32 Architectures Optimization Reference Manual.",
- "EventCode": "0x59",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES",
+ "PEBS": "2",
+ "EventCode": "0xC0",
+ "Counter": "1",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.PREC_DIST",
"SampleAfterValue": "2000003",
- "BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Instructions retired. (Precise Event - PEBS).",
+ "TakenAlone": "1",
+ "CounterHTOff": "1"
},
{
- "EventCode": "0x0D",
+ "EventCode": "0xC1",
"Counter": "0,1,2,3",
- "UMask": "0x3",
- "EdgeDetect": "1",
- "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
- "CounterMask": "1",
+ "UMask": "0x2",
+ "EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired instructions experiencing ITLB misses.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xE6",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of micro-ops retired. (Precise Event)",
+ "EventCode": "0xC2",
"Counter": "0,1,2,3",
- "UMask": "0x1f",
- "EventName": "BACLEARS.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Actually retired uops. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x88",
+ "EventCode": "0xC2",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_INST_EXEC.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0x89",
+ "EventCode": "0xC2",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC2",
@@ -1065,13 +1017,14 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xA8",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 micro-ops or 4 instructions could retire each cycle. This event is used in determining the 'Retiring' category of the Top-Down pipeline slots characterization. (Precise Event - PEBS)",
+ "EventCode": "0xC2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "4",
+ "BriefDescription": "Retirement slots used. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -1086,135 +1039,188 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5E",
- "Invert": "1",
+ "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
+ "EventCode": "0xC3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "1",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0D",
+ "PEBS": "1",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterMask": "1",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "PEBS": "1",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "PEBS": "2",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Return instructions retired. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3",
+ "UMask": "0x10",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Not taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "PEBS": "1",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4",
+ "UMask": "0x20",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "Invert": "1",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "UMask": "0x40",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Far branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "EventCode": "0x3C",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "UMask": "0x0",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "1",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "1",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect mispredicted near call instructions retired. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_MISP_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted not taken branch instructions retired.(Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted taken branch instructions retired. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "BriefDescription": "Count cases of saving new LBR.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
index fd7d7c438226..cfeba5067bab 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
@@ -1,140 +1,226 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
- "MetricGroup": "Frontend",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Average CPU Utilization",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json b/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
index a654ab771fce..b8eccce5d75d 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
@@ -1,131 +1,131 @@
[
{
- "EventCode": "0xAE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB.ITLB_FLUSH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x4F",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "EPT.WALK_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
+ "EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
- "BriefDescription": "Misses at all ITLB levels that cause page walks.",
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x85",
+ "EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "BriefDescription": "Load misses at all DTLB levels that cause completed page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.",
- "EventCode": "0x85",
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
+ "EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x4",
- "EventName": "ITLB_MISSES.WALK_DURATION",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
"SampleAfterValue": "2000003",
"BriefDescription": "Cycles when PMH is busy with page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x85",
+ "PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.",
+ "EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x10",
- "EventName": "ITLB_MISSES.STLB_HIT",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x08",
+ "EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
- "BriefDescription": "Load misses in all DTLB levels that cause page walks.",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x08",
+ "EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Load misses at all DTLB levels that cause completed page walks.",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
- "EventCode": "0x08",
+ "EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x4",
- "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
"SampleAfterValue": "2000003",
"BriefDescription": "Cycles when PMH is busy with page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.",
- "EventCode": "0x08",
+ "EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x10",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x49",
+ "EventCode": "0x4F",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "EPT.WALK_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause page walks.",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x49",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x49",
+ "PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x4",
- "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
"SampleAfterValue": "2000003",
"BriefDescription": "Cycles when PMH is busy with page walks.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x49",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x10",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xAE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/cache.json b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
index 82be7d1b8b81..805ef1436539 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/cache.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
@@ -36,7 +36,7 @@
"BriefDescription": "L2 cache request misses"
},
{
- "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.\r\nCounts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events. \r\n",
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.\r\nCounts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
"EventCode": "0x86",
"Counter": "0,1",
"UMask": "0x4",
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/other.json b/tools/perf/pmu-events/arch/x86/silvermont/other.json
new file mode 100644
index 000000000000..47814046fa9d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/silvermont/other.json
@@ -0,0 +1,20 @@
+[
+ {
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
+ "EventCode": "0x86",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles code-fetch stalled due to an outstanding ITLB miss."
+ },
+ {
+ "PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
+ "EventCode": "0x86",
+ "Counter": "0,1",
+ "UMask": "0x3f",
+ "EventName": "FETCH_STALL.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles code-fetch stalled due to any reason."
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json b/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
index 7468af99190a..1ed62ad4cf77 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
@@ -210,7 +210,7 @@
"UMask": "0x4",
"EventName": "NO_ALLOC_CYCLES.MISPREDICTS",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted jump to retire. After the misprediction is detected, the front end will start immediately but the allocate pipe stalls until the mispredicted "
+ "BriefDescription": "Counts the number of cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted jump to retire. After the misprediction is detected, the front end will start immediately but the allocate pipe stalls until the mispredicted"
},
{
"EventCode": "0xCA",
@@ -275,7 +275,6 @@
},
{
"PublicDescription": "This event counts the number of instructions that retire. For instructions that consist of multiple micro-ops, this event counts exactly once, as the last micro-op of the instruction retires. The event continues counting while instructions retire, including during interrupt service routines caused by hardware interrupts, faults or traps. Background: Modern microprocessors employ extensive pipelining and speculative techniques. Since sometimes an instruction is started but never completed, the notion of \"retirement\" is introduced. A retired instruction is one that commits its states. Or stated differently, an instruction might be abandoned at some point. No instruction is truly finished until it retires. This counter measures the number of completed instructions. The fixed event is INST_RETIRED.ANY and the programmable event is INST_RETIRED.ANY_P.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
@@ -284,7 +283,6 @@
},
{
"PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. In systems with a constant core frequency, this event can give you a measurement of the elapsed time while the core was not in halt state by dividing the event count by the core frequency. This event is architecturally defined and is a designated fixed counter. CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.CORE_P use the core frequency which may change from time to time. CPU_CLK_UNHALTE.REF_TSC and CPU_CLK_UNHALTED.REF are not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. The fixed events are CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.REF_TSC and the programmable events are CPU_CLK_UNHALTED.CORE_P and CPU_CLK_UNHALTED.REF.",
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.CORE",
@@ -293,7 +291,6 @@
},
{
"PublicDescription": "Counts the number of reference cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. Divide this event count by core frequency to determine the elapsed time while the core was not in halt state. Divide this event count by core frequency to determine the elapsed time while the core was not in halt state. This event is architecturally defined and is a designated fixed counter. CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.CORE_P use the core frequency which may change from time to time. CPU_CLK_UNHALTE.REF_TSC and CPU_CLK_UNHALTED.REF are not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. The fixed events are CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.REF_TSC and the programmable events are CPU_CLK_UNHALTED.CORE_P and CPU_CLK_UNHALTED.REF.",
- "EventCode": "0x00",
"Counter": "Fixed counter 3",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/cache.json b/tools/perf/pmu-events/arch/x86/skylake/cache.json
index 54bfe9e4045c..720458139049 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/cache.json
@@ -60,10 +60,10 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts the number of demand Data Read requests that hit L2 cache. Only non rejected loads are counted.",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x41",
+ "UMask": "0xc1",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "Demand Data Read requests that hit L2 cache",
@@ -73,7 +73,7 @@
"PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x42",
+ "UMask": "0xc2",
"EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "RFO requests that hit L2 cache",
@@ -83,7 +83,7 @@
"PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x44",
+ "UMask": "0xc4",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
@@ -482,7 +482,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.\r\n",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -554,7 +554,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. \r\n",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x40",
@@ -661,13 +661,13 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache.",
+ "PublicDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
"EventCode": "0xF2",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "L2_LINES_OUT.USELESS_PREF",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -690,249 +690,2238 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC01C8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10001C8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04001C8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02001C8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01001C8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00801C8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00401C8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0108000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000108000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400108000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200108000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100108000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080108000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040108000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0088000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000088000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400088000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200088000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100088000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080088000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040088000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0048000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000048000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400048000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200048000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100048000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080048000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040048000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0028000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000028000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400028000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0400001 ",
+ "MSRValue": "0x0200028000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100028000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080028000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040028000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests have any response type.",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000018000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC01C0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10001C0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04001C0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02001C0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01001C0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00801C0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00401C0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0100004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000100004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400100004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200100004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100100004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080100004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040100004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0080004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000080004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400080004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200080004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100080004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080080004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040080004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0040004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000040004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400040004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200040004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100040004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080040004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040040004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0020004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040020004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC01C0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10001C0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04001C0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02001C0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01001C0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00801C0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00401C0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0100002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000100002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400100002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200100002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100100002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080100002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040100002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0080002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000080002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400080002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200080002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100080002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080080002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040080002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0020002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040020002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs) have any response type.",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & ANY_SNOOP",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400001 ",
+ "MSRValue": "0x1000400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HITM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400001 ",
+ "MSRValue": "0x0400400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400001 ",
+ "MSRValue": "0x0200400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_MISS",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400001 ",
+ "MSRValue": "0x0100400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400001 ",
+ "MSRValue": "0x0080400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040400001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NONE",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c0001 ",
+ "MSRValue": "0x3FC01C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT & ANY_SNOOP",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c0001 ",
+ "MSRValue": "0x10001C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_HITM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c0001 ",
+ "MSRValue": "0x04001C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops sent to sibling cores return clean response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c0001 ",
+ "MSRValue": "0x02001C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops sent to sibling cores return clean response.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c0001 ",
+ "MSRValue": "0x01001C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c0001 ",
+ "MSRValue": "0x00801C0001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00401C0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0100001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000100001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400100001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200100001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100100001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_NONE",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0020001 ",
+ "MSRValue": "0x0080100001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040100001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0080001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000080001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400080001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200080001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100080001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080080001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040080001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0040001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000040001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400040001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200040001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100040001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080040001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040040001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC0020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020001 ",
+ "MSRValue": "0x1000020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020001 ",
+ "MSRValue": "0x0400020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020001 ",
+ "MSRValue": "0x0200020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020001 ",
+ "MSRValue": "0x0100020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020001 ",
+ "MSRValue": "0x0080020001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040020001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010001 ",
+ "MSRValue": "0x0000010001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that have any response type.",
+ "BriefDescription": "Counts demand data reads have any response type.",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylake/frontend.json b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
index 578dff5bd823..7fa95a35e3ca 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
@@ -177,7 +177,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -242,7 +242,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
+ "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.",
"EventCode": "0xAB",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -253,7 +253,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. \r\n",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
"EventCode": "0xC6",
"MSRValue": "0x11",
"Counter": "0,1,2,3",
@@ -360,7 +360,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. \r\n",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
"EventCode": "0xC6",
"MSRValue": "0x400806",
"Counter": "0,1,2,3",
@@ -374,7 +374,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.\r\n",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
"EventCode": "0xC6",
"MSRValue": "0x401006",
"Counter": "0,1,2,3",
@@ -388,7 +388,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.\r\n",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
"EventCode": "0xC6",
"MSRValue": "0x402006",
"Counter": "0,1,2,3",
@@ -454,7 +454,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.\r\n",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
"EventCode": "0xC6",
"MSRValue": "0x100206",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/memory.json b/tools/perf/pmu-events/arch/x86/skylake/memory.json
index 3bd8b712c889..f197b4c7695b 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/memory.json
@@ -215,7 +215,7 @@
"UMask": "0x4",
"EventName": "HLE_RETIRED.ABORTED",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one). ",
+ "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -237,6 +237,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"EventCode": "0xC8",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -292,7 +293,7 @@
"UMask": "0x4",
"EventName": "RTM_RETIRED.ABORTED",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one). ",
+ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -346,7 +347,7 @@
},
{
"PEBS": "2",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
"MSRValue": "0x4",
"Counter": "0,1,2,3",
@@ -354,13 +355,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "2",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
"MSRValue": "0x8",
"Counter": "0,1,2,3",
@@ -368,13 +369,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
"SampleAfterValue": "50021",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "2",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
"MSRValue": "0x10",
"Counter": "0,1,2,3",
@@ -382,13 +383,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
"SampleAfterValue": "20011",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "2",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
"MSRValue": "0x20",
"Counter": "0,1,2,3",
@@ -396,13 +397,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "2",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
"MSRValue": "0x40",
"Counter": "0,1,2,3",
@@ -410,13 +411,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
"SampleAfterValue": "2003",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "2",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
"MSRValue": "0x80",
"Counter": "0,1,2,3",
@@ -424,13 +425,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
"SampleAfterValue": "1009",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "2",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
"MSRValue": "0x100",
"Counter": "0,1,2,3",
@@ -438,13 +439,13 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
"SampleAfterValue": "503",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "2",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
"MSRValue": "0x200",
"Counter": "0,1,2,3",
@@ -452,163 +453,1151 @@
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
"SampleAfterValue": "101",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc000001 ",
+ "MSRValue": "0x3FFC408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x203C408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x103C408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043C408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023C408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013C408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00BC408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x007C408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC4008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0044008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20001C8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000108000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000088000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000048000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000028000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FFC400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x203C400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x103C400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043C400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023C400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013C400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00BC400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x007C400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC4000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0044000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20001C0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000100004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000080004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000040004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FFC400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x203C400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x103C400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043C400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023C400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013C400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00BC400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x007C400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC4000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0044000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20001C0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000100002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000080002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FFC400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x203C400001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & ANY_SNOOP",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c000001 ",
+ "MSRValue": "0x103C400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_HITM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000001 ",
+ "MSRValue": "0x043C400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000001 ",
+ "MSRValue": "0x023C400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_MISS",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000001 ",
+ "MSRValue": "0x013C400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000001 ",
+ "MSRValue": "0x00BC400001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NONE",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4000001 ",
+ "MSRValue": "0x007C400001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FC4000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000001 ",
+ "MSRValue": "0x1004000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000001 ",
+ "MSRValue": "0x0404000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000001 ",
+ "MSRValue": "0x0204000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000001 ",
+ "MSRValue": "0x0104000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000001 ",
+ "MSRValue": "0x0084000001",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0044000001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000400001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20001C0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000100001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000080001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000040001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6, 0x1a7",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "BriefDescription": "Counts demand data reads",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
index bc6d2afbcd8a..4a891fbbc4bb 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
@@ -1,7 +1,6 @@
[
{
"PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
- "EventCode": "0x00",
"Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
@@ -11,7 +10,6 @@
},
{
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
@@ -20,7 +18,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"Counter": "Fixed counter 1",
"UMask": "0x2",
"AnyThread": "1",
@@ -31,7 +28,6 @@
},
{
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
- "EventCode": "0x00",
"Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
@@ -121,7 +117,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to \u201cMixing Intel AVX and Intel SSE Code\u201d section of the Optimization Guide.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -248,6 +244,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
+ "EventCode": "0x59",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
"EventCode": "0x5E",
"Counter": "0,1,2,3",
@@ -361,8 +367,8 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts resource-related stall cycles. Reasons for stalls can be as follows:a. *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots).b. *any* u-arch structure got empty (like INT/SIMD FreeLists).c. FPU control word (FPCW), MXCSR.and others. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
- "EventCode": "0xA2",
+ "PublicDescription": "Counts resource-related stall cycles.",
+ "EventCode": "0xa2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "RESOURCE_STALLS.ANY",
@@ -735,7 +741,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
@@ -759,6 +765,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Number of machine clears (nukes) of any type.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -839,14 +846,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts not taken branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x10",
"Errata": "SKL091",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired.",
+ "BriefDescription": "Counts all not taken macro branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -924,7 +932,7 @@
"UMask": "0x20",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken. ",
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -938,6 +946,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "ROB_MISC_EVENTS.PAUSE_INST",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
"EventCode": "0xE6",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
index 71e9737f4614..2c95417a4dae 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
@@ -1,164 +1,364 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
- "MetricGroup": "Frontend",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "Branch_Misprediction_Cost"
},
{
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts_SMT",
+ "MetricName": "Branch_Misprediction_Cost_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Access_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "MetricExpr": "arb@event\\=0x80\\,umask\\=0x2@ / arb@event\\=0x80\\,umask\\=0x2\\,thresh\\=1@",
+ "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_Parallel_Reads"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/cache.json b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
index 5c9940866acd..24df183693fa 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
@@ -61,17 +61,17 @@
},
{
"EventCode": "0x24",
- "UMask": "0x41",
+ "UMask": "0xc1",
"BriefDescription": "Demand Data Read requests that hit L2 cache",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "PublicDescription": "Counts the number of demand Data Read requests that hit L2 cache. Only non rejected loads are counted.",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x24",
- "UMask": "0x42",
+ "UMask": "0xc2",
"BriefDescription": "RFO requests that hit L2 cache",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.RFO_HIT",
@@ -81,7 +81,7 @@
},
{
"EventCode": "0x24",
- "UMask": "0x44",
+ "UMask": "0xc4",
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"Counter": "0,1,2,3",
"EventName": "L2_RQSTS.CODE_RD_HIT",
@@ -165,6 +165,7 @@
"BriefDescription": "Core-originated cacheable demand requests missed L3",
"Counter": "0,1,2,3",
"EventName": "LONGEST_LAT_CACHE.MISS",
+ "Errata": "SKL057",
"PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
@@ -175,28 +176,29 @@
"BriefDescription": "Core-originated cacheable demand requests that refer to L3",
"Counter": "0,1,2,3",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all accesses to the L3.",
+ "Errata": "SKL057",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all accesses to the L3.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x48",
"UMask": "0x1",
- "BriefDescription": "L1D miss outstandings duration in cycles",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
"Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.PENDING",
- "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x48",
"UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "BriefDescription": "L1D miss outstandings duration in cycles",
"Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -234,21 +236,21 @@
{
"EventCode": "0x60",
"UMask": "0x1",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
"UMask": "0x1",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -307,21 +309,21 @@
{
"EventCode": "0x60",
"UMask": "0x8",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
"UMask": "0x8",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -486,7 +488,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.\r\n",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -558,7 +560,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. \r\n",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -690,6 +692,7 @@
"BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared state. A non-threaded event.",
"Counter": "0,1,2,3",
"EventName": "L2_LINES_OUT.SILENT",
+ "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -699,17 +702,18 @@
"BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped",
"Counter": "0,1,2,3",
"EventName": "L2_LINES_OUT.NON_SILENT",
- "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped.",
+ "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xF2",
"UMask": "0x4",
- "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
+ "Deprecated": "1",
"Counter": "0,1,2,3",
"EventName": "L2_LINES_OUT.USELESS_PREF",
- "PublicDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache.",
+ "PublicDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -736,12 +740,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that have any response type.",
- "MSRValue": "0x0000010001 ",
+ "BriefDescription": "Counts demand data reads have any response type.",
+ "MSRValue": "0x0000010001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -749,12 +753,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0001 ",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x01003C0001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -762,25 +766,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0001 ",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x04003C0001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0001 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -788,12 +779,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0001 ",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x10003C0001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -801,12 +792,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that hit in the L3.",
- "MSRValue": "0x3f803c0001 ",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x3F803C0001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -814,12 +805,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that have any response type.",
- "MSRValue": "0x0000010002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
+ "MSRValue": "0x0000010002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -827,12 +818,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x01003C0002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -840,12 +831,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x04003C0002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -853,25 +844,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0002 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x10003C0002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -879,12 +857,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3.",
- "MSRValue": "0x3f803c0002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x3F803C0002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -892,12 +870,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that have any response type.",
- "MSRValue": "0x0000010004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.",
+ "MSRValue": "0x0000010004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -905,12 +883,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+ "MSRValue": "0x01003C0004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -918,12 +896,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+ "MSRValue": "0x04003C0004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -931,25 +909,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0004 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+ "MSRValue": "0x10003C0004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -957,12 +922,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that hit in the L3.",
- "MSRValue": "0x3f803c0004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+ "MSRValue": "0x3F803C0004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -970,12 +935,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that have any response type.",
- "MSRValue": "0x0000010010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
+ "MSRValue": "0x0000010010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -983,12 +948,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x01003C0010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -996,12 +961,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x04003C0010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1009,25 +974,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "PF_L2_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0010 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x10003C0010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1035,12 +987,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3.",
- "MSRValue": "0x3f803c0010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x3F803C0010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1048,12 +1000,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type.",
- "MSRValue": "0x0000010020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
+ "MSRValue": "0x0000010020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1061,12 +1013,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x01003C0020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1074,12 +1026,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x04003C0020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1087,25 +1039,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "PF_L2_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0020 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x10003C0020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1113,12 +1052,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3.",
- "MSRValue": "0x3f803c0020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x3F803C0020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1126,12 +1065,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.",
- "MSRValue": "0x0000010080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
+ "MSRValue": "0x0000010080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1139,12 +1078,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x01003C0080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1152,25 +1091,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x04003C0080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0080 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1178,12 +1104,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x10003C0080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1191,12 +1117,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3.",
- "MSRValue": "0x3f803c0080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x3F803C0080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1204,12 +1130,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.",
- "MSRValue": "0x0000010100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
+ "MSRValue": "0x0000010100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1217,12 +1143,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x01003C0100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1230,12 +1156,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x04003C0100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1243,12 +1169,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x10003C0100",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1256,12 +1182,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x3F803C0100",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1269,12 +1195,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3.",
- "MSRValue": "0x3f803c0100 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests have any response type.",
+ "MSRValue": "0x0000010400",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1282,12 +1208,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that have any response type.",
- "MSRValue": "0x0000010400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x01003C0400",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1295,12 +1221,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x04003C0400",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1308,12 +1234,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x10003C0400",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1321,12 +1247,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "PF_L1D_AND_SW & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x3F803C0400",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1334,12 +1260,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0400 ",
+ "BriefDescription": "TBD have any response type.",
+ "MSRValue": "0x0000010490",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1347,12 +1273,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3.",
- "MSRValue": "0x3f803c0400 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x01003C0490",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1360,12 +1286,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts any other requests that have any response type.",
- "MSRValue": "0x0000018000 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x04003C0490",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1373,12 +1299,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c8000 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x10003C0490",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1386,12 +1312,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c8000 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3F803C0490",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1399,12 +1325,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "OTHER & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c8000 ",
+ "BriefDescription": "TBD have any response type.",
+ "MSRValue": "0x0000010120",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1412,12 +1338,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c8000 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x01003C0120",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1425,12 +1351,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts any other requests that hit in the L3.",
- "MSRValue": "0x3f803c8000 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x04003C0120",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1438,12 +1364,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that have any response type.",
- "MSRValue": "0x0000010490 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x10003C0120",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1451,12 +1377,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0490 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3F803C0120",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1464,12 +1390,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0490 ",
+ "BriefDescription": "TBD have any response type.",
+ "MSRValue": "0x0000010491",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1477,12 +1403,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0490 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x01003C0491",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1490,12 +1416,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0490 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x04003C0491",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1503,12 +1429,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that hit in the L3.",
- "MSRValue": "0x3f803c0490 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x10003C0491",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1516,12 +1442,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that have any response type.",
- "MSRValue": "0x0000010120 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3F803C0491",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1529,12 +1455,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0120 ",
+ "BriefDescription": "TBD have any response type.",
+ "MSRValue": "0x0000010122",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD have any response type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1542,12 +1468,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0120 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x01003C0122",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1555,12 +1481,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "ALL_PF_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0120 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x04003C0122",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1568,12 +1494,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0120 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x10003C0122",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1581,12 +1507,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that hit in the L3.",
- "MSRValue": "0x3f803c0120 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3F803C0122",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1594,12 +1520,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that have any response type.",
- "MSRValue": "0x0000010491 ",
+ "BriefDescription": "Counts demand data reads",
+ "MSRValue": "0x08007C0001",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "Counts demand data reads",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1607,12 +1532,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0491 ",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "MSRValue": "0x08007C0002",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "Counts all demand data writes (RFOs)",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1620,12 +1544,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0491 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "MSRValue": "0x08007C0004",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1633,12 +1556,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "ALL_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0491 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "MSRValue": "0x08007C0010",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1646,12 +1568,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0491 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "MSRValue": "0x08007C0020",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1659,12 +1580,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3.",
- "MSRValue": "0x3f803c0491 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRValue": "0x08007C0080",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1672,12 +1592,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that have any response type.",
- "MSRValue": "0x0000010122 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "MSRValue": "0x08007C0100",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1685,12 +1604,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "MSRValue": "0x01003c0122 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "MSRValue": "0x08007C0400",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1698,12 +1616,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x04003c0122 ",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x08007C0490",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1711,12 +1628,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "ALL_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
- "MSRValue": "0x08003c0122 ",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x08007C0120",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1724,12 +1640,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "MSRValue": "0x10003c0122 ",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x08007C0491",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1737,12 +1652,11 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3.",
- "MSRValue": "0x3f803c0122 ",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x08007C0122",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "PublicDescription": "TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
index 286ed1a37ec9..c5d0babe89fc 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
@@ -59,7 +59,6 @@
"BriefDescription": "Number of Packed Double-Precision FP arithmetic instructions (Use operation multiplier of 8)",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
- "PublicDescription": "Number of Packed Double-Precision FP arithmetic instructions (Use operation multiplier of 8).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -69,7 +68,6 @@
"BriefDescription": "Number of Packed Single-Precision FP arithmetic instructions (Use operation multiplier of 16)",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
- "PublicDescription": "Number of Packed Single-Precision FP arithmetic instructions (Use operation multiplier of 16).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
index 403a4f89e9b2..4dc583cfb545 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
@@ -2,16 +2,6 @@
{
"EventCode": "0x79",
"UMask": "0x4",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_UOPS",
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x4",
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
"EventName": "IDQ.MITE_CYCLES",
@@ -22,11 +12,11 @@
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "UMask": "0x4",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -43,6 +33,16 @@
},
{
"EventCode": "0x79",
+ "UMask": "0x8",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
"UMask": "0x10",
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
@@ -55,22 +55,22 @@
{
"EventCode": "0x79",
"UMask": "0x18",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
"Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
- "CounterMask": "4",
- "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "CounterMask": "1",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"UMask": "0x18",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
"Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
- "CounterMask": "1",
- "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -87,22 +87,22 @@
{
"EventCode": "0x79",
"UMask": "0x24",
- "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
"Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
- "CounterMask": "4",
- "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "CounterMask": "1",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"UMask": "0x24",
- "BriefDescription": "Cycles MITE is delivering any Uop",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
"Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
- "CounterMask": "1",
- "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -118,24 +118,24 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EdgeDetect": "1",
"EventCode": "0x79",
"UMask": "0x30",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "CounterMask": "1",
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
"EventCode": "0x79",
"UMask": "0x30",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
+ "EventName": "IDQ.MS_SWITCHES",
+ "CounterMask": "1",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -177,67 +177,67 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
"EventCode": "0x9C",
"UMask": "0x1",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
"Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
- "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x9C",
"UMask": "0x1",
- "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
"Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
- "CounterMask": "4",
- "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "CounterMask": "1",
+ "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x9C",
"UMask": "0x1",
- "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
"Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
- "CounterMask": "3",
- "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "CounterMask": "2",
+ "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x9C",
"UMask": "0x1",
- "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
"Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
- "CounterMask": "2",
- "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "CounterMask": "3",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x9C",
"UMask": "0x1",
- "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
"Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
- "CounterMask": "1",
- "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "CounterMask": "4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
"EventCode": "0x9C",
"UMask": "0x1",
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
- "CounterMask": "1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -247,20 +247,19 @@
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"Counter": "0,1,2,3",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
+ "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x11",
+ "MSRValue": "0x400406",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. \r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -268,11 +267,11 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x12",
+ "MSRValue": "0x200206",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
@@ -281,11 +280,11 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x13",
+ "MSRValue": "0x400206",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
@@ -294,13 +293,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced iTLB true miss. Precise Event.",
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x14",
+ "MSRValue": "0x15",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -308,13 +307,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss. Precise Event.",
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x15",
+ "MSRValue": "0x14",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -322,11 +321,11 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x400206",
+ "MSRValue": "0x13",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
@@ -335,11 +334,11 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x200206",
+ "MSRValue": "0x12",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
@@ -348,12 +347,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x400406",
+ "MSRValue": "0x11",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -361,13 +361,12 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x400806",
+ "MSRValue": "0x300206",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. \r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -375,13 +374,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x401006",
+ "MSRValue": "0x100206",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.\r\n",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -389,13 +388,12 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x402006",
+ "MSRValue": "0x420006",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.\r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -403,11 +401,11 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x404006",
+ "MSRValue": "0x410006",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
@@ -429,11 +427,11 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x410006",
+ "MSRValue": "0x404006",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
"TakenAlone": "1",
"SampleAfterValue": "100007",
@@ -442,12 +440,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x420006",
+ "MSRValue": "0x402006",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -455,13 +454,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
- "MSRValue": "0x100206",
+ "MSRValue": "0x401006",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.\r\n",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -469,12 +468,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
"PEBS": "1",
- "MSRValue": "0x300206",
+ "MSRValue": "0x400806",
"Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/memory.json b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
index e7f1aa31226d..48a9cdf81307 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
@@ -129,20 +129,20 @@
{
"EventCode": "0x60",
"UMask": "0x10",
- "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
- "CounterMask": "1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
+ "CounterMask": "6",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
"UMask": "0x10",
- "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
+ "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -210,7 +210,7 @@
{
"EventCode": "0xC8",
"UMask": "0x4",
- "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one). ",
+ "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED",
@@ -242,6 +242,7 @@
"BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
+ "PublicDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -287,7 +288,7 @@
{
"EventCode": "0xC9",
"UMask": "0x4",
- "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one). ",
+ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "RTM_RETIRED.ABORTED",
@@ -347,125 +348,125 @@
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
"PEBS": "2",
- "MSRValue": "0x4",
+ "MSRValue": "0x200",
"Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
"TakenAlone": "1",
- "SampleAfterValue": "100003",
+ "SampleAfterValue": "101",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
"PEBS": "2",
- "MSRValue": "0x8",
+ "MSRValue": "0x100",
"Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
"TakenAlone": "1",
- "SampleAfterValue": "50021",
+ "SampleAfterValue": "503",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
"PEBS": "2",
- "MSRValue": "0x10",
+ "MSRValue": "0x80",
"Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
"TakenAlone": "1",
- "SampleAfterValue": "20011",
+ "SampleAfterValue": "1009",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
"PEBS": "2",
- "MSRValue": "0x20",
+ "MSRValue": "0x40",
"Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
"TakenAlone": "1",
- "SampleAfterValue": "100007",
+ "SampleAfterValue": "2003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
"PEBS": "2",
- "MSRValue": "0x40",
+ "MSRValue": "0x20",
"Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
"TakenAlone": "1",
- "SampleAfterValue": "2003",
+ "SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
"PEBS": "2",
- "MSRValue": "0x80",
+ "MSRValue": "0x10",
"Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
"TakenAlone": "1",
- "SampleAfterValue": "1009",
+ "SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
"PEBS": "2",
- "MSRValue": "0x100",
+ "MSRValue": "0x8",
"Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
"TakenAlone": "1",
- "SampleAfterValue": "503",
+ "SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xCD",
"UMask": "0x1",
- "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
"PEBS": "2",
- "MSRValue": "0x200",
+ "MSRValue": "0x4",
"Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
"TakenAlone": "1",
- "SampleAfterValue": "101",
+ "SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that miss in the L3.",
- "MSRValue": "0x3fbc000001 ",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x3FBC000001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -473,12 +474,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00001 ",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x083FC00001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -486,12 +487,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00001 ",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x103FC00001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -499,12 +500,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00001 ",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x063FC00001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -512,12 +513,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800001 ",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x063B800001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -525,12 +526,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000001 ",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0604000001",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -538,12 +539,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3.",
- "MSRValue": "0x3fbc000002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x3FBC000002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -551,12 +552,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x083FC00002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -564,12 +565,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x103FC00002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -577,12 +578,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x063FC00002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -590,12 +591,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x063B800002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -603,12 +604,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000002 ",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0604000002",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -616,12 +617,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that miss in the L3.",
- "MSRValue": "0x3fbc000004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+ "MSRValue": "0x3FBC000004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -629,12 +630,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+ "MSRValue": "0x083FC00004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -642,12 +643,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+ "MSRValue": "0x103FC00004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -655,12 +656,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+ "MSRValue": "0x063FC00004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -668,12 +669,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+ "MSRValue": "0x063B800004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -681,12 +682,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000004 ",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+ "MSRValue": "0x0604000004",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -694,12 +695,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3.",
- "MSRValue": "0x3fbc000010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x3FBC000010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -707,12 +708,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x083FC00010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -720,12 +721,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x103FC00010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -733,12 +734,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x063FC00010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -746,12 +747,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x063B800010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -759,12 +760,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000010 ",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0604000010",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -772,12 +773,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3.",
- "MSRValue": "0x3fbc000020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x3FBC000020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -785,12 +786,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x083FC00020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -798,12 +799,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x103FC00020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -811,12 +812,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x063FC00020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -824,12 +825,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x063B800020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -837,12 +838,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000020 ",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0604000020",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -850,12 +851,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3.",
- "MSRValue": "0x3fbc000080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x3FBC000080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -863,12 +864,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x083FC00080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -876,12 +877,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x103FC00080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -889,12 +890,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x063FC00080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -902,12 +903,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x063B800080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -915,12 +916,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000080 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0604000080",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -928,12 +929,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3.",
- "MSRValue": "0x3fbc000100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x3FBC000100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -941,12 +942,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x083FC00100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -954,12 +955,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x103FC00100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -967,12 +968,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x063FC00100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -980,12 +981,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x063B800100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -993,12 +994,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000100 ",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0604000100",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1006,12 +1007,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss in the L3.",
- "MSRValue": "0x3fbc000400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x3FBC000400",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1019,12 +1020,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x083FC00400",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1032,12 +1033,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x103FC00400",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1045,12 +1046,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x063FC00400",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1058,12 +1059,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x063B800400",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1071,90 +1072,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000400 ",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0604000400",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests that miss in the L3.",
- "MSRValue": "0x3fbc008000 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc08000 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc08000 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc08000 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b808000 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604008000 ",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1162,12 +1085,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that miss in the L3.",
- "MSRValue": "0x3fbc000490 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3FBC000490",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1175,12 +1098,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00490 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x083FC00490",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1188,12 +1111,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00490 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x103FC00490",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1201,12 +1124,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00490 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063FC00490",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1214,12 +1137,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800490 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063B800490",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1227,12 +1150,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000490 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0604000490",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1240,12 +1163,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that miss in the L3.",
- "MSRValue": "0x3fbc000120 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3FBC000120",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1253,12 +1176,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00120 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x083FC00120",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1266,12 +1189,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00120 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x103FC00120",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1279,12 +1202,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00120 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063FC00120",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1292,12 +1215,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800120 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063B800120",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1305,12 +1228,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000120 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0604000120",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1318,12 +1241,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3.",
- "MSRValue": "0x3fbc000491 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3FBC000491",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1331,12 +1254,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00491 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x083FC00491",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1344,12 +1267,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00491 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x103FC00491",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1357,12 +1280,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00491 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063FC00491",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1370,12 +1293,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800491 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063B800491",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1383,12 +1306,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000491 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0604000491",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1396,12 +1319,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3.",
- "MSRValue": "0x3fbc000122 ",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3FBC000122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1409,12 +1332,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
- "MSRValue": "0x083fc00122 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x083FC00122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1422,12 +1345,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the modified data is transferred from remote cache.",
- "MSRValue": "0x103fc00122 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x103FC00122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1435,12 +1358,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local or remote dram.",
- "MSRValue": "0x063fc00122 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063FC00122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1448,12 +1371,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from remote dram.",
- "MSRValue": "0x063b800122 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063B800122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1461,12 +1384,12 @@
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram.",
- "MSRValue": "0x0604000122 ",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0604000122",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "PublicDescription": "TBD TBD",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
index f99f7ae27820..369f56c1d1b5 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
@@ -1,6 +1,5 @@
[
{
- "EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
"Counter": "Fixed counter 0",
@@ -10,7 +9,6 @@
"CounterHTOff": "Fixed counter 0"
},
{
- "EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state",
"Counter": "Fixed counter 1",
@@ -20,7 +18,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"Counter": "Fixed counter 1",
@@ -30,7 +27,6 @@
"CounterHTOff": "Fixed counter 1"
},
{
- "EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
"Counter": "Fixed counter 2",
@@ -99,24 +95,24 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
"EventCode": "0x0E",
"UMask": "0x1",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
"Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.ANY",
- "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
"EventCode": "0x0E",
"UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
"Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -126,7 +122,7 @@
"BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
"Counter": "0,1,2,3",
"EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
- "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to \u201cMixing Intel AVX and Intel SSE Code\u201d section of the Optimization Guide.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -203,19 +199,19 @@
{
"EventCode": "0x3C",
"UMask": "0x1",
- "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
"Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
"SampleAfterValue": "2503",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x3C",
"UMask": "0x1",
- "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
"Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"SampleAfterValue": "2503",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -248,12 +244,12 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5E",
+ "EventCode": "0x59",
"UMask": "0x1",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
"Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
+ "EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
+ "PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -271,6 +267,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x87",
"UMask": "0x1",
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
@@ -361,12 +367,12 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA2",
+ "EventCode": "0xa2",
"UMask": "0x1",
"BriefDescription": "Resource-related stall cycles",
"Counter": "0,1,2,3",
"EventName": "RESOURCE_STALLS.ANY",
- "PublicDescription": "Counts resource-related stall cycles. Reasons for stalls can be as follows:a. *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots).b. *any* u-arch structure got empty (like INT/SIMD FreeLists).c. FPU control word (FPCW), MXCSR.and others. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "PublicDescription": "Counts resource-related stall cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -522,6 +528,17 @@
{
"EventCode": "0xA8",
"UMask": "0x1",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
"Counter": "0,1,2,3",
"EventName": "LSD.CYCLES_ACTIVE",
@@ -531,35 +548,35 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA8",
+ "EventCode": "0xB1",
"UMask": "0x1",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
"Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_4_UOPS",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
"CounterMask": "4",
- "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xB1",
"UMask": "0x1",
- "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.THREAD",
- "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "CounterMask": "3",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
"EventCode": "0xB1",
"UMask": "0x1",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "CounterMask": "2",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -575,35 +592,24 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
"EventCode": "0xB1",
"UMask": "0x1",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "CounterMask": "2",
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "CounterMask": "3",
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xB1",
"UMask": "0x1",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "CounterMask": "4",
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PublicDescription": "Number of uops to be executed per-thread each cycle.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -618,11 +624,12 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
"EventCode": "0xB1",
"UMask": "0x2",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
"CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
@@ -630,10 +637,10 @@
{
"EventCode": "0xB1",
"UMask": "0x2",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "CounterMask": "2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "CounterMask": "4",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -650,20 +657,19 @@
{
"EventCode": "0xB1",
"UMask": "0x2",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "CounterMask": "4",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "CounterMask": "2",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
"EventCode": "0xB1",
"UMask": "0x2",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
@@ -725,12 +731,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
"EventCode": "0xC2",
"UMask": "0x2",
- "BriefDescription": "Retirement slots used.",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "Counts the retirement slots used.",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "CounterMask": "10",
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -742,19 +750,17 @@
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
"EventCode": "0xC2",
"UMask": "0x2",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "BriefDescription": "Retirement slots used.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
- "CounterMask": "10",
- "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PublicDescription": "Counts the retirement slots used.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -766,6 +772,7 @@
"Counter": "0,1,2,3",
"EventName": "MACHINE_CLEARS.COUNT",
"CounterMask": "1",
+ "PublicDescription": "Number of machine clears (nukes) of any type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -841,11 +848,12 @@
{
"EventCode": "0xC4",
"UMask": "0x10",
- "BriefDescription": "Not taken branch instructions retired.",
+ "BriefDescription": "Counts all not taken macro branch instructions retired.",
+ "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"Errata": "SKL091",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts not taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -919,7 +927,7 @@
{
"EventCode": "0xC5",
"UMask": "0x20",
- "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken. ",
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
@@ -938,6 +946,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xCC",
+ "UMask": "0x40",
+ "BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
+ "Counter": "0,1,2,3",
+ "EventName": "ROB_MISC_EVENTS.PAUSE_INST",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xE6",
"UMask": "0x1",
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
index 71e9737f4614..56e03ba771f4 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -1,164 +1,394 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound"
+ },
+ {
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation"
+ },
+ {
+ "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Bad_Speculation_SMT"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound"
+ },
+ {
+ "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Backend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
+ "MetricGroup": "TopdownL1_SMT",
+ "MetricName": "Retiring_SMT"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
"MetricGroup": "TopDownL1",
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retiring",
"MetricName": "UPI"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
- "MetricGroup": "Frontend",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTB"
+ },
+ {
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
+ "MetricGroup": "PGO",
"MetricName": "IFetch_Line_Utilization"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;Frontend_Bandwidth",
"MetricName": "DSB_Coverage"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "BriefDescription": "Cycles Per Instruction (threaded)",
"MetricGroup": "Pipeline;Summary",
"MetricName": "CPI"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active.",
"MetricGroup": "Summary",
"MetricName": "CLKS"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricExpr": "4 * cycles",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
"MetricGroup": "TopDownL1",
"MetricName": "SLOTS"
},
{
- "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Total issue-pipeline slots (per core)",
+ "MetricGroup": "TopDownL1_SMT",
+ "MetricName": "SLOTS_SMT"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means loads are more frequent)",
+ "MetricGroup": "Instruction_Type;L1_Bound",
+ "MetricName": "IpL"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store",
+ "MetricGroup": "Instruction_Type;Store_Bound",
+ "MetricName": "IpS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch",
+ "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6",
+ "MetricName": "IpB"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instruction per (near) call",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
"MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
"MetricGroup": "Summary",
"MetricName": "Instructions"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
"BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
"MetricGroup": "SMT",
"MetricName": "CoreIPC"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC_SMT"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "FLOPS_SMT",
+ "MetricName": "FLOPc_SMT"
+ },
+ {
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
"MetricGroup": "Pipeline;Ports_Utilization",
"MetricName": "ILP"
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "Branch_Misprediction_Cost"
+ },
+ {
+ "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)",
+ "MetricGroup": "Branch_Mispredicts_SMT",
+ "MetricName": "Branch_Misprediction_Cost_SMT"
},
{
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "Branch_Mispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
"BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
"MetricGroup": "SMT",
"MetricName": "CORE_CLKS"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
"MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)",
"MetricGroup": "Memory_Bound;Memory_BW",
"MetricName": "MLP"
},
{
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
"MetricGroup": "TLB",
"MetricName": "Page_Walks_Utilization"
},
{
- "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "TLB_SMT",
+ "MetricName": "Page_Walks_Utilization_SMT"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "L3_Cache_Access_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "Cache_Misses;",
+ "MetricName": "L3MPKI"
+ },
+ {
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
"MetricGroup": "Summary",
"MetricName": "CPU_Utilization"
},
{
+ "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
"BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
"MetricGroup": "FLOPS;Summary",
"MetricName": "GFLOPs"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
"MetricGroup": "Power",
"MetricName": "Turbo_Utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
"MetricGroup": "SMT;Summary",
"MetricName": "SMT_2T_Utilization"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
"MetricGroup": "Summary",
"MetricName": "Kernel_Utilization"
},
{
- "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_Lat",
+ "MetricName": "DRAM_Read_Latency"
+ },
+ {
+ "MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@",
+ "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "DRAM_Parallel_Reads"
+ },
+ {
+ "MetricExpr": "( 1000000000 * ( imc@event\\=0xe0\\\\\\,umask\\=0x1@ / imc@event\\=0xe3@ ) / imc_0@event\\=0x0@ ) if 1 if 0 == 1 else 0 else 0",
+ "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
+ "MetricGroup": "Memory_Lat",
+ "MetricName": "MEM_PMM_Read_Latency"
+ },
+ {
+ "MetricExpr": "( ( 64 * imc@event\\=0xe3@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0",
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "PMM_Read_BW"
+ },
+ {
+ "MetricExpr": "( ( 64 * imc@event\\=0xe7@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0",
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+ "MetricGroup": "Memory_BW",
+ "MetricName": "PMM_Write_BW"
+ },
+ {
+ "MetricExpr": "cha_0@event\\=0x0@",
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricGroup": "",
+ "MetricName": "Socket_CLKS"
+ },
+ {
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per core",
"MetricName": "C3_Core_Residency"
},
{
- "BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per core",
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per core",
"MetricName": "C7_Core_Residency"
},
{
- "BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C2 residency percent per package",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C3 residency percent per package",
"MetricName": "C3_Pkg_Residency"
},
{
- "BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C6 residency percent per package",
"MetricName": "C6_Pkg_Residency"
},
{
- "BriefDescription": "C7 residency percent per package",
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
+ "BriefDescription": "C7 residency percent per package",
"MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 68c92bb599ee..58f77fd0f59f 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -235,6 +235,7 @@ static struct map {
{ "iMPH-U", "uncore_arb" },
{ "CPU-M-CF", "cpum_cf" },
{ "CPU-M-SF", "cpum_sf" },
+ { "UPI LL", "uncore_upi" },
{}
};
@@ -414,7 +415,6 @@ static int save_arch_std_events(void *data, char *name, char *event,
char *metric_name, char *metric_group)
{
struct event_struct *es;
- struct stat *sb = data;
es = malloc(sizeof(*es));
if (!es)
diff --git a/tools/perf/python/twatch.py b/tools/perf/python/twatch.py
index 0a29c5c3079f..ff87ccf5b708 100755
--- a/tools/perf/python/twatch.py
+++ b/tools/perf/python/twatch.py
@@ -1,17 +1,10 @@
#! /usr/bin/python
+# SPDX-License-Identifier: GPL-2.0-only
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
-# This application is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; version 2.
-#
-# This application is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
import perf
diff --git a/tools/perf/scripts/Build b/tools/perf/scripts/Build
index 41efd7e368b3..68d4b54574ad 100644
--- a/tools/perf/scripts/Build
+++ b/tools/perf/scripts/Build
@@ -1,2 +1,2 @@
-libperf-$(CONFIG_LIBPERL) += perl/Perf-Trace-Util/
-libperf-$(CONFIG_LIBPYTHON) += python/Perf-Trace-Util/
+perf-$(CONFIG_LIBPERL) += perl/Perf-Trace-Util/
+perf-$(CONFIG_LIBPYTHON) += python/Perf-Trace-Util/
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Build b/tools/perf/scripts/perl/Perf-Trace-Util/Build
index 34faecf774ae..db0036129307 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Build
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Build
@@ -1,4 +1,4 @@
-libperf-y += Context.o
+perf-y += Context.o
CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes
CFLAGS_Context.o += -Wno-unused-parameter -Wno-nested-externs -Wno-undef
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
index 28431d1bbcf5..ead521dd8d79 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* This file was generated automatically by ExtUtils::ParseXS version 2.18_02 from the
* contents of Context.xs. Do not edit this file, edit Context.xs instead.
*
* ANY CHANGES MADE HERE WILL BE LOST!
- *
*/
#include <stdbool.h>
#ifndef HAS_BOOL
@@ -14,21 +14,6 @@
* Context.xs. XS interfaces for perf script.
*
* Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include "EXTERN.h"
diff --git a/tools/perf/scripts/perl/rw-by-file.pl b/tools/perf/scripts/perl/rw-by-file.pl
index 74844ee2be3e..168fa5e94b44 100644
--- a/tools/perf/scripts/perl/rw-by-file.pl
+++ b/tools/perf/scripts/perl/rw-by-file.pl
@@ -1,6 +1,6 @@
#!/usr/bin/perl -w
+# SPDX-License-Identifier: GPL-2.0-only
# (c) 2009, Tom Zanussi <tzanussi@gmail.com>
-# Licensed under the terms of the GNU GPL License version 2
# Display r/w activity for files read/written to for a given program
diff --git a/tools/perf/scripts/perl/rw-by-pid.pl b/tools/perf/scripts/perl/rw-by-pid.pl
index 9db23c9daf55..495698250b2f 100644
--- a/tools/perf/scripts/perl/rw-by-pid.pl
+++ b/tools/perf/scripts/perl/rw-by-pid.pl
@@ -1,6 +1,6 @@
#!/usr/bin/perl -w
+# SPDX-License-Identifier: GPL-2.0-only
# (c) 2009, Tom Zanussi <tzanussi@gmail.com>
-# Licensed under the terms of the GNU GPL License version 2
# Display r/w activity for all processes
diff --git a/tools/perf/scripts/perl/rwtop.pl b/tools/perf/scripts/perl/rwtop.pl
index 8b20787021c1..6473442568a2 100644
--- a/tools/perf/scripts/perl/rwtop.pl
+++ b/tools/perf/scripts/perl/rwtop.pl
@@ -1,6 +1,6 @@
#!/usr/bin/perl -w
+# SPDX-License-Identifier: GPL-2.0-only
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
-# Licensed under the terms of the GNU GPL License version 2
# read/write top
#
diff --git a/tools/perf/scripts/perl/wakeup-latency.pl b/tools/perf/scripts/perl/wakeup-latency.pl
index d9143dcec6c6..efcfec5e347a 100644
--- a/tools/perf/scripts/perl/wakeup-latency.pl
+++ b/tools/perf/scripts/perl/wakeup-latency.pl
@@ -1,6 +1,6 @@
#!/usr/bin/perl -w
+# SPDX-License-Identifier: GPL-2.0-only
# (c) 2009, Tom Zanussi <tzanussi@gmail.com>
-# Licensed under the terms of the GNU GPL License version 2
# Display avg/min/max wakeup latency
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Build b/tools/perf/scripts/python/Perf-Trace-Util/Build
index aefc15c9444a..7d0e33ce6aba 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/Build
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Build
@@ -1,3 +1,3 @@
-libperf-y += Context.o
+perf-y += Context.o
CFLAGS_Context.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Context.c b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
index 1a0d27757eec..217568bc29ce 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/Context.c
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
@@ -1,22 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Context.c. Python interfaces for perf script.
*
* Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <Python.h>
diff --git a/tools/perf/scripts/python/check-perf-trace.py b/tools/perf/scripts/python/check-perf-trace.py
index 334599c6032c..d2c22954800d 100644
--- a/tools/perf/scripts/python/check-perf-trace.py
+++ b/tools/perf/scripts/python/check-perf-trace.py
@@ -7,6 +7,8 @@
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
+from __future__ import print_function
+
import os
import sys
@@ -19,64 +21,64 @@ from perf_trace_context import *
unhandled = autodict()
def trace_begin():
- print "trace_begin"
+ print("trace_begin")
pass
def trace_end():
- print_unhandled()
+ print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, vec):
- print_header(event_name, common_cpu, common_secs, common_nsecs,
- common_pid, common_comm)
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, vec):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
- print_uncommon(context)
+ print_uncommon(context)
- print "vec=%s\n" % \
- (symbol_str("irq__softirq_entry", "vec", vec)),
+ print("vec=%s" % (symbol_str("irq__softirq_entry", "vec", vec)))
def kmem__kmalloc(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, call_site, ptr, bytes_req, bytes_alloc,
- gfp_flags):
- print_header(event_name, common_cpu, common_secs, common_nsecs,
- common_pid, common_comm)
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, call_site, ptr, bytes_req, bytes_alloc,
+ gfp_flags):
+ print_header(event_name, common_cpu, common_secs, common_nsecs,
+ common_pid, common_comm)
- print_uncommon(context)
+ print_uncommon(context)
- print "call_site=%u, ptr=%u, bytes_req=%u, " \
- "bytes_alloc=%u, gfp_flags=%s\n" % \
+ print("call_site=%u, ptr=%u, bytes_req=%u, "
+ "bytes_alloc=%u, gfp_flags=%s" %
(call_site, ptr, bytes_req, bytes_alloc,
-
- flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
+ flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)))
def trace_unhandled(event_name, context, event_fields_dict):
- try:
- unhandled[event_name] += 1
- except TypeError:
- unhandled[event_name] = 1
+ try:
+ unhandled[event_name] += 1
+ except TypeError:
+ unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
- print "%-20s %5u %05u.%09u %8u %-20s " % \
- (event_name, cpu, secs, nsecs, pid, comm),
+ print("%-20s %5u %05u.%09u %8u %-20s " %
+ (event_name, cpu, secs, nsecs, pid, comm),
+ end=' ')
# print trace fields not included in handler args
def print_uncommon(context):
- print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
- % (common_pc(context), trace_flag_str(common_flags(context)), \
- common_lock_depth(context))
+ print("common_preempt_count=%d, common_flags=%s, "
+ "common_lock_depth=%d, " %
+ (common_pc(context), trace_flag_str(common_flags(context)),
+ common_lock_depth(context)))
def print_unhandled():
- keys = unhandled.keys()
- if not keys:
- return
+ keys = unhandled.keys()
+ if not keys:
+ return
- print "\nunhandled events:\n\n",
+ print("\nunhandled events:\n")
- print "%-40s %10s\n" % ("event", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "-----------"),
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "-----------"))
- for event_name in keys:
- print "%-40s %10d\n" % (event_name, unhandled[event_name])
+ for event_name in keys:
+ print("%-40s %10d\n" % (event_name, unhandled[event_name]))
diff --git a/tools/perf/scripts/python/compaction-times.py b/tools/perf/scripts/python/compaction-times.py
index 239cb0568ec3..2560a042dc6f 100644
--- a/tools/perf/scripts/python/compaction-times.py
+++ b/tools/perf/scripts/python/compaction-times.py
@@ -216,15 +216,15 @@ def compaction__mm_compaction_migratepages(event_name, context, common_cpu,
pair(nr_migrated, nr_failed), None, None)
def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
chead.increment_pending(common_pid,
None, pair(nr_scanned, nr_taken), None)
def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
chead.increment_pending(common_pid,
None, None, pair(nr_scanned, nr_taken))
diff --git a/tools/perf/scripts/python/event_analyzing_sample.py b/tools/perf/scripts/python/event_analyzing_sample.py
index 4e843b9864ec..aa1e2cfa26a6 100644
--- a/tools/perf/scripts/python/event_analyzing_sample.py
+++ b/tools/perf/scripts/python/event_analyzing_sample.py
@@ -15,6 +15,8 @@
# for a x86 HW PMU event: PEBS with load latency data.
#
+from __future__ import print_function
+
import os
import sys
import math
@@ -37,7 +39,7 @@ con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
- print "In trace_begin:\n"
+ print("In trace_begin:\n")
#
# Will create several tables at the start, pebs_ll is for PEBS data with
@@ -76,12 +78,12 @@ def process_event(param_dict):
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
- if (param_dict.has_key("dso")):
+ if ("dso" in param_dict):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
- if (param_dict.has_key("symbol")):
+ if ("symbol" in param_dict):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
@@ -102,7 +104,7 @@ def insert_db(event):
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
- print "In trace_end:\n"
+ print("In trace_end:\n")
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
@@ -123,29 +125,29 @@ def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
- print "There is %d records in gen_events table" % t[0]
+ print("There is %d records in gen_events table" % t[0])
if t[0] == 0:
return
- print "Statistics about the general events grouped by thread/symbol/dso: \n"
+ print("Statistics about the general events grouped by thread/symbol/dso: \n")
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
- print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
for row in commq:
- print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by symbol
- print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by dso
- print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
+ print("\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74))
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
- print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%40s %8d %s" % (row[0], row[1], num2sym(row[1])))
#
# This function just shows the basic info, and we could do more with the
@@ -156,35 +158,35 @@ def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
- print "There is %d records in pebs_ll table" % t[0]
+ print("There is %d records in pebs_ll table" % t[0])
if t[0] == 0:
return
- print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
+ print("Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n")
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
- print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
+ print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
for row in commq:
- print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by symbol
- print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
- print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58))
for row in dseq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
- print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
+ print("\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58))
for row in latq:
- print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
+ print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
def trace_unhandled(event_name, context, event_fields_dict):
- print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+ print (' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 0564dd7377f2..c3eae1d77d36 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
+from __future__ import print_function
+
import os
import sys
import struct
@@ -199,6 +201,18 @@ import datetime
from PySide.QtSql import *
+if sys.version_info < (3, 0):
+ def toserverstr(str):
+ return str
+ def toclientstr(str):
+ return str
+else:
+ # Assume UTF-8 server_encoding and client_encoding
+ def toserverstr(str):
+ return bytes(str, "UTF_8")
+ def toclientstr(str):
+ return bytes(str, "UTF_8")
+
# Need to access PostgreSQL C library directly to use COPY FROM STDIN
from ctypes import *
libpq = CDLL("libpq.so.5")
@@ -234,12 +248,17 @@ perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False
+def printerr(*args, **kw_args):
+ print(*args, file=sys.stderr, **kw_args)
+
+def printdate(*args, **kw_args):
+ print(datetime.datetime.today(), *args, sep=' ', **kw_args)
def usage():
- print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]"
- print >> sys.stderr, "where: columns 'all' or 'branches'"
- print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
- print >> sys.stderr, " callchains 'callchains' => create call_paths table"
+ printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]")
+ printerr("where: columns 'all' or 'branches'")
+ printerr(" calls 'calls' => create calls and call_paths table")
+ printerr(" callchains 'callchains' => create call_paths table")
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
@@ -273,7 +292,7 @@ def do_query(q, s):
return
raise Exception("Query failed: " + q.lastError().text())
-print datetime.datetime.today(), "Creating database..."
+printdate("Creating database...")
db = QSqlDatabase.addDatabase('QPSQL')
query = QSqlQuery(db)
@@ -394,7 +413,8 @@ if perf_db_export_calls:
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
- 'flags integer)')
+ 'flags integer,'
+ 'parent_id bigint)')
do_query(query, 'CREATE VIEW machines_view AS '
'SELECT '
@@ -478,8 +498,9 @@ if perf_db_export_calls:
'branch_count,'
'call_id,'
'return_id,'
- 'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
- 'parent_call_path_id'
+ 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE CAST ( flags AS VARCHAR(6) ) END AS flags,'
+ 'parent_call_path_id,'
+ 'calls.parent_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
do_query(query, 'CREATE VIEW samples_view AS '
@@ -504,12 +525,12 @@ do_query(query, 'CREATE VIEW samples_view AS '
' FROM samples')
-file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
-file_trailer = "\377\377"
+file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0)
+file_trailer = b"\377\377"
def open_output_file(file_name):
path_name = output_dir_name + "/" + file_name
- file = open(path_name, "w+")
+ file = open(path_name, "wb+")
file.write(file_header)
return file
@@ -524,13 +545,13 @@ def copy_output_file_direct(file, table_name):
# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
def copy_output_file(file, table_name):
- conn = PQconnectdb("dbname = " + dbname)
+ conn = PQconnectdb(toclientstr("dbname = " + dbname))
if (PQstatus(conn)):
raise Exception("COPY FROM STDIN PQconnectdb failed")
file.write(file_trailer)
file.seek(0)
sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
- res = PQexec(conn, sql)
+ res = PQexec(conn, toclientstr(sql))
if (PQresultStatus(res) != 4):
raise Exception("COPY FROM STDIN PQexec failed")
data = file.read(65536)
@@ -564,7 +585,7 @@ if perf_db_export_calls:
call_file = open_output_file("call_table.bin")
def trace_begin():
- print datetime.datetime.today(), "Writing to intermediate files..."
+ printdate("Writing to intermediate files...")
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
@@ -575,11 +596,12 @@ def trace_begin():
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
+ call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
unhandled_count = 0
def trace_end():
- print datetime.datetime.today(), "Copying to database..."
+ printdate("Copying to database...")
copy_output_file(evsel_file, "selected_events")
copy_output_file(machine_file, "machines")
copy_output_file(thread_file, "threads")
@@ -594,7 +616,7 @@ def trace_end():
if perf_db_export_calls:
copy_output_file(call_file, "calls")
- print datetime.datetime.today(), "Removing intermediate files..."
+ printdate("Removing intermediate files...")
remove_output_file(evsel_file)
remove_output_file(machine_file)
remove_output_file(thread_file)
@@ -609,7 +631,7 @@ def trace_end():
if perf_db_export_calls:
remove_output_file(call_file)
os.rmdir(output_dir_name)
- print datetime.datetime.today(), "Adding primary keys"
+ printdate("Adding primary keys")
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
@@ -624,7 +646,7 @@ def trace_end():
if perf_db_export_calls:
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
- print datetime.datetime.today(), "Adding foreign keys"
+ printdate("Adding foreign keys")
do_query(query, 'ALTER TABLE threads '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
@@ -657,10 +679,11 @@ def trace_end():
'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
+ do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
if (unhandled_count):
- print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
- print datetime.datetime.today(), "Done"
+ printdate("Warning: ", unhandled_count, " unhandled events")
+ printdate("Done")
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
@@ -670,12 +693,14 @@ def sched__sched_switch(*x):
pass
def evsel_table(evsel_id, evsel_name, *x):
+ evsel_name = toserverstr(evsel_name)
n = len(evsel_name)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
evsel_file.write(value)
def machine_table(machine_id, pid, root_dir, *x):
+ root_dir = toserverstr(root_dir)
n = len(root_dir)
fmt = "!hiqiii" + str(n) + "s"
value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
@@ -686,6 +711,7 @@ def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
thread_file.write(value)
def comm_table(comm_id, comm_str, *x):
+ comm_str = toserverstr(comm_str)
n = len(comm_str)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
@@ -697,6 +723,9 @@ def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
comm_thread_file.write(value)
def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
+ short_name = toserverstr(short_name)
+ long_name = toserverstr(long_name)
+ build_id = toserverstr(build_id)
n1 = len(short_name)
n2 = len(long_name)
n3 = len(build_id)
@@ -705,12 +734,14 @@ def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
dso_file.write(value)
def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
+ symbol_name = toserverstr(symbol_name)
n = len(symbol_name)
fmt = "!hiqiqiqiqiii" + str(n) + "s"
value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
symbol_file.write(value)
def branch_type_table(branch_type, name, *x):
+ name = toserverstr(name)
n = len(name)
fmt = "!hiii" + str(n) + "s"
value = struct.pack(fmt, 2, 4, branch_type, n, name)
@@ -728,7 +759,7 @@ def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
call_path_file.write(value)
-def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x):
- fmt = "!hiqiqiqiqiqiqiqiqiqiqii"
- value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags)
+def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, *x):
+ fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiq"
+ value = struct.pack(fmt, 12, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id)
call_file.write(value)
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
index 245caf2643ed..bf271fbc3a88 100644
--- a/tools/perf/scripts/python/export-to-sqlite.py
+++ b/tools/perf/scripts/python/export-to-sqlite.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
+from __future__ import print_function
+
import os
import sys
import struct
@@ -60,11 +62,17 @@ perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False
+def printerr(*args, **keyword_args):
+ print(*args, file=sys.stderr, **keyword_args)
+
+def printdate(*args, **kw_args):
+ print(datetime.datetime.today(), *args, sep=' ', **kw_args)
+
def usage():
- print >> sys.stderr, "Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]"
- print >> sys.stderr, "where: columns 'all' or 'branches'"
- print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
- print >> sys.stderr, " callchains 'callchains' => create call_paths table"
+ printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]");
+ printerr("where: columns 'all' or 'branches'");
+ printerr(" calls 'calls' => create calls and call_paths table");
+ printerr(" callchains 'callchains' => create call_paths table");
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
@@ -100,7 +108,7 @@ def do_query_(q):
return
raise Exception("Query failed: " + q.lastError().text())
-print datetime.datetime.today(), "Creating database..."
+printdate("Creating database ...")
db_exists = False
try:
@@ -222,7 +230,8 @@ if perf_db_export_calls:
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
- 'flags integer)')
+ 'flags integer,'
+ 'parent_id bigint)')
# printf was added to sqlite in version 3.8.3
sqlite_has_printf = False
@@ -320,8 +329,9 @@ if perf_db_export_calls:
'branch_count,'
'call_id,'
'return_id,'
- 'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
- 'parent_call_path_id'
+ 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
+ 'parent_call_path_id,'
+ 'calls.parent_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
do_query(query, 'CREATE VIEW samples_view AS '
@@ -373,10 +383,10 @@ if perf_db_export_calls or perf_db_export_callchains:
call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)")
if perf_db_export_calls:
call_query = QSqlQuery(db)
- call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
+ call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
def trace_begin():
- print datetime.datetime.today(), "Writing records..."
+ printdate("Writing records...")
do_query(query, 'BEGIN TRANSACTION')
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
@@ -388,19 +398,21 @@ def trace_begin():
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
+ call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
unhandled_count = 0
def trace_end():
do_query(query, 'END TRANSACTION')
- print datetime.datetime.today(), "Adding indexes"
+ printdate("Adding indexes")
if perf_db_export_calls:
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
+ do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
if (unhandled_count):
- print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
- print datetime.datetime.today(), "Done"
+ printdate("Warning: ", unhandled_count, " unhandled events")
+ printdate("Done")
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
@@ -452,4 +464,4 @@ def call_path_table(*x):
bind_exec(call_path_query, 4, x)
def call_return_table(*x):
- bind_exec(call_query, 11, x)
+ bind_exec(call_query, 12, x)
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index f278ce5ebab7..affed7d149be 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python2
+#!/usr/bin/env python2
# SPDX-License-Identifier: GPL-2.0
# exported-sql-viewer.py: view data from sql database
# Copyright (c) 2014-2018, Intel Corporation.
@@ -88,20 +88,39 @@
# 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip)
# 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
+from __future__ import print_function
+
import sys
import weakref
import threading
import string
-import cPickle
+try:
+ # Python2
+ import cPickle as pickle
+ # size of pickled integer big enough for record size
+ glb_nsz = 8
+except ImportError:
+ import pickle
+ glb_nsz = 16
import re
import os
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtSql import *
+pyside_version_1 = True
from decimal import *
from ctypes import *
from multiprocessing import Process, Array, Value, Event
+# xrange is range in Python3
+try:
+ xrange
+except NameError:
+ xrange = range
+
+def printerr(*args, **keyword_args):
+ print(*args, file=sys.stderr, **keyword_args)
+
# Data formatting helpers
def tohex(ip):
@@ -167,9 +186,10 @@ class Thread(QThread):
class TreeModel(QAbstractItemModel):
- def __init__(self, root, parent=None):
+ def __init__(self, glb, parent=None):
super(TreeModel, self).__init__(parent)
- self.root = root
+ self.glb = glb
+ self.root = self.GetRoot()
self.last_row_read = 0
def Item(self, parent):
@@ -436,6 +456,10 @@ class CallGraphLevelItemBase(object):
self.query_done = False;
self.child_count = 0
self.child_items = []
+ if parent_item:
+ self.level = parent_item.level + 1
+ else:
+ self.level = 0
def getChildItem(self, row):
return self.child_items[row]
@@ -557,24 +581,12 @@ class CallGraphRootItem(CallGraphLevelItemBase):
self.child_items.append(child_item)
self.child_count += 1
-# Context-sensitive call graph data model
+# Context-sensitive call graph data model base
-class CallGraphModel(TreeModel):
+class CallGraphModelBase(TreeModel):
def __init__(self, glb, parent=None):
- super(CallGraphModel, self).__init__(CallGraphRootItem(glb), parent)
- self.glb = glb
-
- def columnCount(self, parent=None):
- return 7
-
- def columnHeader(self, column):
- headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
- return headers[column]
-
- def columnAlignment(self, column):
- alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
- return alignment[column]
+ super(CallGraphModelBase, self).__init__(glb, parent)
def FindSelect(self, value, pattern, query):
if pattern:
@@ -594,34 +606,7 @@ class CallGraphModel(TreeModel):
match = " GLOB '" + str(value) + "'"
else:
match = " = '" + str(value) + "'"
- QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
- " FROM calls"
- " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
- " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
- " WHERE symbols.name" + match +
- " GROUP BY comm_id, thread_id, call_path_id"
- " ORDER BY comm_id, thread_id, call_path_id")
-
- def FindPath(self, query):
- # Turn the query result into a list of ids that the tree view can walk
- # to open the tree at the right place.
- ids = []
- parent_id = query.value(0)
- while parent_id:
- ids.insert(0, parent_id)
- q2 = QSqlQuery(self.glb.db)
- QueryExec(q2, "SELECT parent_id"
- " FROM call_paths"
- " WHERE id = " + str(parent_id))
- if not q2.next():
- break
- parent_id = q2.value(0)
- # The call path root is not used
- if ids[0] == 1:
- del ids[0]
- ids.insert(0, query.value(2))
- ids.insert(0, query.value(1))
- return ids
+ self.DoFindSelect(query, match)
def Found(self, query, found):
if found:
@@ -675,6 +660,201 @@ class CallGraphModel(TreeModel):
def FindDone(self, thread, callback, ids):
callback(ids)
+# Context-sensitive call graph data model
+
+class CallGraphModel(CallGraphModelBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallGraphModel, self).__init__(glb, parent)
+
+ def GetRoot(self):
+ return CallGraphRootItem(self.glb)
+
+ def columnCount(self, parent=None):
+ return 7
+
+ def columnHeader(self, column):
+ headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
+ return headers[column]
+
+ def columnAlignment(self, column):
+ alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
+ return alignment[column]
+
+ def DoFindSelect(self, query, match):
+ QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " WHERE symbols.name" + match +
+ " GROUP BY comm_id, thread_id, call_path_id"
+ " ORDER BY comm_id, thread_id, call_path_id")
+
+ def FindPath(self, query):
+ # Turn the query result into a list of ids that the tree view can walk
+ # to open the tree at the right place.
+ ids = []
+ parent_id = query.value(0)
+ while parent_id:
+ ids.insert(0, parent_id)
+ q2 = QSqlQuery(self.glb.db)
+ QueryExec(q2, "SELECT parent_id"
+ " FROM call_paths"
+ " WHERE id = " + str(parent_id))
+ if not q2.next():
+ break
+ parent_id = q2.value(0)
+ # The call path root is not used
+ if ids[0] == 1:
+ del ids[0]
+ ids.insert(0, query.value(2))
+ ids.insert(0, query.value(1))
+ return ids
+
+# Call tree data model level 2+ item base
+
+class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
+
+ def __init__(self, glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item):
+ super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, row, parent_item)
+ self.comm_id = comm_id
+ self.thread_id = thread_id
+ self.calls_id = calls_id
+ self.branch_count = branch_count
+ self.time = time
+
+ def Select(self):
+ self.query_done = True;
+ if self.calls_id == 0:
+ comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id)
+ else:
+ comm_thread = ""
+ query = QSqlQuery(self.glb.db)
+ QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time, branch_count"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " INNER JOIN dsos ON symbols.dso_id = dsos.id"
+ " WHERE calls.parent_id = " + str(self.calls_id) + comm_thread +
+ " ORDER BY call_time, calls.id")
+ while query.next():
+ child_item = CallTreeLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call tree data model level three item
+
+class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase):
+
+ def __init__(self, glb, row, comm_id, thread_id, calls_id, name, dso, count, time, branch_count, parent_item):
+ super(CallTreeLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item)
+ dso = dsoname(dso)
+ self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
+ self.dbid = calls_id
+
+# Call tree data model level two item
+
+class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase):
+
+ def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item):
+ super(CallTreeLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 0, 0, 0, parent_item)
+ self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
+ self.dbid = thread_id
+
+ def Select(self):
+ super(CallTreeLevelTwoItem, self).Select()
+ for child_item in self.child_items:
+ self.time += child_item.time
+ self.branch_count += child_item.branch_count
+ for child_item in self.child_items:
+ child_item.data[4] = PercentToOneDP(child_item.time, self.time)
+ child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
+
+# Call tree data model level one item
+
+class CallTreeLevelOneItem(CallGraphLevelItemBase):
+
+ def __init__(self, glb, row, comm_id, comm, parent_item):
+ super(CallTreeLevelOneItem, self).__init__(glb, row, parent_item)
+ self.data = [comm, "", "", "", "", "", ""]
+ self.dbid = comm_id
+
+ def Select(self):
+ self.query_done = True;
+ query = QSqlQuery(self.glb.db)
+ QueryExec(query, "SELECT thread_id, pid, tid"
+ " FROM comm_threads"
+ " INNER JOIN threads ON thread_id = threads.id"
+ " WHERE comm_id = " + str(self.dbid))
+ while query.next():
+ child_item = CallTreeLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call tree data model root item
+
+class CallTreeRootItem(CallGraphLevelItemBase):
+
+ def __init__(self, glb):
+ super(CallTreeRootItem, self).__init__(glb, 0, None)
+ self.dbid = 0
+ self.query_done = True;
+ query = QSqlQuery(glb.db)
+ QueryExec(query, "SELECT id, comm FROM comms")
+ while query.next():
+ if not query.value(0):
+ continue
+ child_item = CallTreeLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self)
+ self.child_items.append(child_item)
+ self.child_count += 1
+
+# Call Tree data model
+
+class CallTreeModel(CallGraphModelBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallTreeModel, self).__init__(glb, parent)
+
+ def GetRoot(self):
+ return CallTreeRootItem(self.glb)
+
+ def columnCount(self, parent=None):
+ return 7
+
+ def columnHeader(self, column):
+ headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
+ return headers[column]
+
+ def columnAlignment(self, column):
+ alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
+ return alignment[column]
+
+ def DoFindSelect(self, query, match):
+ QueryExec(query, "SELECT calls.id, comm_id, thread_id"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " WHERE symbols.name" + match +
+ " ORDER BY comm_id, thread_id, call_time, calls.id")
+
+ def FindPath(self, query):
+ # Turn the query result into a list of ids that the tree view can walk
+ # to open the tree at the right place.
+ ids = []
+ parent_id = query.value(0)
+ while parent_id:
+ ids.insert(0, parent_id)
+ q2 = QSqlQuery(self.glb.db)
+ QueryExec(q2, "SELECT parent_id"
+ " FROM calls"
+ " WHERE id = " + str(parent_id))
+ if not q2.next():
+ break
+ parent_id = q2.value(0)
+ ids.insert(0, query.value(2))
+ ids.insert(0, query.value(1))
+ return ids
+
# Vertical widget layout
class VBox():
@@ -693,28 +873,21 @@ class VBox():
def Widget(self):
return self.vbox
-# Context-sensitive call graph window
+# Tree window base
-class CallGraphWindow(QMdiSubWindow):
+class TreeWindowBase(QMdiSubWindow):
- def __init__(self, glb, parent=None):
- super(CallGraphWindow, self).__init__(parent)
+ def __init__(self, parent=None):
+ super(TreeWindowBase, self).__init__(parent)
- self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
+ self.model = None
+ self.find_bar = None
self.view = QTreeView()
- self.view.setModel(self.model)
+ self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
+ self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard
- for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
- self.view.setColumnWidth(c, w)
-
- self.find_bar = FindBar(self, self)
-
- self.vbox = VBox(self.view, self.find_bar.Widget())
-
- self.setWidget(self.vbox.Widget())
-
- AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
+ self.context_menu = TreeContextMenu(self.view)
def DisplayFound(self, ids):
if not len(ids):
@@ -747,6 +920,51 @@ class CallGraphWindow(QMdiSubWindow):
if not found:
self.find_bar.NotFound()
+
+# Context-sensitive call graph window
+
+class CallGraphWindow(TreeWindowBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallGraphWindow, self).__init__(parent)
+
+ self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
+
+ self.view.setModel(self.model)
+
+ for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
+ self.view.setColumnWidth(c, w)
+
+ self.find_bar = FindBar(self, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
+
+# Call tree window
+
+class CallTreeWindow(TreeWindowBase):
+
+ def __init__(self, glb, parent=None):
+ super(CallTreeWindow, self).__init__(parent)
+
+ self.model = LookupCreateModel("Call Tree", lambda x=glb: CallTreeModel(x))
+
+ self.view.setModel(self.model)
+
+ for c, w in ((0, 230), (1, 100), (2, 100), (3, 70), (4, 70), (5, 100)):
+ self.view.setColumnWidth(c, w)
+
+ self.find_bar = FindBar(self, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, "Call Tree")
+
# Child data item finder
class ChildDataItemFinder():
@@ -812,10 +1030,6 @@ class ChildDataItemFinder():
glb_chunk_sz = 10000
-# size of pickled integer big enough for record size
-
-glb_nsz = 8
-
# Background process for SQL data fetcher
class SQLFetcherProcess():
@@ -874,7 +1088,7 @@ class SQLFetcherProcess():
return True
if space >= glb_nsz:
# Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer
- nd = cPickle.dumps(0, cPickle.HIGHEST_PROTOCOL)
+ nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL)
self.buffer[self.local_head : self.local_head + len(nd)] = nd
self.local_head = 0
if self.local_tail - self.local_head > sz:
@@ -892,9 +1106,9 @@ class SQLFetcherProcess():
self.wait_event.wait()
def AddToBuffer(self, obj):
- d = cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL)
+ d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
n = len(d)
- nd = cPickle.dumps(n, cPickle.HIGHEST_PROTOCOL)
+ nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL)
sz = n + glb_nsz
self.WaitForSpace(sz)
pos = self.local_head
@@ -1006,12 +1220,12 @@ class SQLFetcher(QObject):
pos = self.local_tail
if len(self.buffer) - pos < glb_nsz:
pos = 0
- n = cPickle.loads(self.buffer[pos : pos + glb_nsz])
+ n = pickle.loads(self.buffer[pos : pos + glb_nsz])
if n == 0:
pos = 0
- n = cPickle.loads(self.buffer[0 : glb_nsz])
+ n = pickle.loads(self.buffer[0 : glb_nsz])
pos += glb_nsz
- obj = cPickle.loads(self.buffer[pos : pos + n])
+ obj = pickle.loads(self.buffer[pos : pos + n])
self.local_tail = pos + n
return obj
@@ -1320,6 +1534,19 @@ def BranchDataPrep(query):
" (" + dsoname(query.value(15)) + ")")
return data
+def BranchDataPrepWA(query):
+ data = []
+ data.append(query.value(0))
+ # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+ data.append("{:>19}".format(query.value(1)))
+ for i in xrange(2, 8):
+ data.append(query.value(i))
+ data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
+ " (" + dsoname(query.value(11)) + ")" + " -> " +
+ tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
+ " (" + dsoname(query.value(15)) + ")")
+ return data
+
# Branch data model
class BranchModel(TreeModel):
@@ -1327,8 +1554,7 @@ class BranchModel(TreeModel):
progress = Signal(object)
def __init__(self, glb, event_id, where_clause, parent=None):
- super(BranchModel, self).__init__(BranchRootItem(), parent)
- self.glb = glb
+ super(BranchModel, self).__init__(glb, parent)
self.event_id = event_id
self.more = True
self.populated = 0
@@ -1348,10 +1574,17 @@ class BranchModel(TreeModel):
" AND evsel_id = " + str(self.event_id) +
" ORDER BY samples.id"
" LIMIT " + str(glb_chunk_sz))
- self.fetcher = SQLFetcher(glb, sql, BranchDataPrep, self.AddSample)
+ if pyside_version_1 and sys.version_info[0] == 3:
+ prep = BranchDataPrepWA
+ else:
+ prep = BranchDataPrep
+ self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
+ def GetRoot(self):
+ return BranchRootItem()
+
def columnCount(self, parent=None):
return 8
@@ -1398,25 +1631,39 @@ class BranchModel(TreeModel):
def HasMoreRecords(self):
return self.more
+# Report Variables
+
+class ReportVars():
+
+ def __init__(self, name = "", where_clause = "", limit = ""):
+ self.name = name
+ self.where_clause = where_clause
+ self.limit = limit
+
+ def UniqueId(self):
+ return str(self.where_clause + ";" + self.limit)
+
# Branch window
class BranchWindow(QMdiSubWindow):
- def __init__(self, glb, event_id, name, where_clause, parent=None):
+ def __init__(self, glb, event_id, report_vars, parent=None):
super(BranchWindow, self).__init__(parent)
- model_name = "Branch Events " + str(event_id)
- if len(where_clause):
- model_name = where_clause + " " + model_name
+ model_name = "Branch Events " + str(event_id) + " " + report_vars.UniqueId()
- self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, where_clause))
+ self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, report_vars.where_clause))
self.view = QTreeView()
self.view.setUniformRowHeights(True)
+ self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
+ self.view.CopyCellsToClipboard = CopyTreeCellsToClipboard
self.view.setModel(self.model)
self.ResizeColumnsToContents()
+ self.context_menu = TreeContextMenu(self.view)
+
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.model.root)
@@ -1427,7 +1674,7 @@ class BranchWindow(QMdiSubWindow):
self.setWidget(self.vbox.Widget())
- AddSubWindow(glb.mainwindow.mdi_area, self, name + " Branch Events")
+ AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name + " Branch Events")
def ResizeColumnToContents(self, column, n):
# Using the view's resizeColumnToContents() here is extrememly slow
@@ -1472,47 +1719,134 @@ class BranchWindow(QMdiSubWindow):
else:
self.find_bar.NotFound()
-# Dialog data item converted and validated using a SQL table
+# Line edit data item
-class SQLTableDialogDataItem():
+class LineEditDataItem(object):
- def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
+ def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
self.glb = glb
self.label = label
self.placeholder_text = placeholder_text
- self.table_name = table_name
- self.match_column = match_column
- self.column_name1 = column_name1
- self.column_name2 = column_name2
self.parent = parent
+ self.id = id
- self.value = ""
+ self.value = default
- self.widget = QLineEdit()
+ self.widget = QLineEdit(default)
self.widget.editingFinished.connect(self.Validate)
self.widget.textChanged.connect(self.Invalidate)
self.red = False
self.error = ""
self.validated = True
- self.last_id = 0
- self.first_time = 0
- self.last_time = 2 ** 64
- if self.table_name == "<timeranges>":
- query = QSqlQuery(self.glb.db)
- QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
- if query.next():
- self.last_id = int(query.value(0))
- self.last_time = int(query.value(1))
- QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
- if query.next():
- self.first_time = int(query.value(0))
- if placeholder_text:
- placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
-
if placeholder_text:
self.widget.setPlaceholderText(placeholder_text)
+ def TurnTextRed(self):
+ if not self.red:
+ palette = QPalette()
+ palette.setColor(QPalette.Text,Qt.red)
+ self.widget.setPalette(palette)
+ self.red = True
+
+ def TurnTextNormal(self):
+ if self.red:
+ palette = QPalette()
+ self.widget.setPalette(palette)
+ self.red = False
+
+ def InvalidValue(self, value):
+ self.value = ""
+ self.TurnTextRed()
+ self.error = self.label + " invalid value '" + value + "'"
+ self.parent.ShowMessage(self.error)
+
+ def Invalidate(self):
+ self.validated = False
+
+ def DoValidate(self, input_string):
+ self.value = input_string.strip()
+
+ def Validate(self):
+ self.validated = True
+ self.error = ""
+ self.TurnTextNormal()
+ self.parent.ClearMessage()
+ input_string = self.widget.text()
+ if not len(input_string.strip()):
+ self.value = ""
+ return
+ self.DoValidate(input_string)
+
+ def IsValid(self):
+ if not self.validated:
+ self.Validate()
+ if len(self.error):
+ self.parent.ShowMessage(self.error)
+ return False
+ return True
+
+ def IsNumber(self, value):
+ try:
+ x = int(value)
+ except:
+ x = 0
+ return str(x) == value
+
+# Non-negative integer ranges dialog data item
+
+class NonNegativeIntegerRangesDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, column_name, parent):
+ super(NonNegativeIntegerRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
+
+ self.column_name = column_name
+
+ def DoValidate(self, input_string):
+ singles = []
+ ranges = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ if "-" in value:
+ vrange = value.split("-")
+ if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
+ return self.InvalidValue(value)
+ ranges.append(vrange)
+ else:
+ if not self.IsNumber(value):
+ return self.InvalidValue(value)
+ singles.append(value)
+ ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
+ if len(singles):
+ ranges.append(self.column_name + " IN (" + ",".join(singles) + ")")
+ self.value = " OR ".join(ranges)
+
+# Positive integer dialog data item
+
+class PositiveIntegerDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
+ super(PositiveIntegerDataItem, self).__init__(glb, label, placeholder_text, parent, id, default)
+
+ def DoValidate(self, input_string):
+ if not self.IsNumber(input_string.strip()):
+ return self.InvalidValue(input_string)
+ value = int(input_string.strip())
+ if value <= 0:
+ return self.InvalidValue(input_string)
+ self.value = str(value)
+
+# Dialog data item converted and validated using a SQL table
+
+class SQLTableDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
+ super(SQLTableDataItem, self).__init__(glb, label, placeholder_text, parent)
+
+ self.table_name = table_name
+ self.match_column = match_column
+ self.column_name1 = column_name1
+ self.column_name2 = column_name2
+
def ValueToIds(self, value):
ids = []
query = QSqlQuery(self.glb.db)
@@ -1523,6 +1857,42 @@ class SQLTableDialogDataItem():
ids.append(str(query.value(0)))
return ids
+ def DoValidate(self, input_string):
+ all_ids = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ ids = self.ValueToIds(value)
+ if len(ids):
+ all_ids.extend(ids)
+ else:
+ return self.InvalidValue(value)
+ self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
+ if self.column_name2:
+ self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
+
+# Sample time ranges dialog data item converted and validated using 'samples' SQL table
+
+class SampleTimeRangesDataItem(LineEditDataItem):
+
+ def __init__(self, glb, label, placeholder_text, column_name, parent):
+ self.column_name = column_name
+
+ self.last_id = 0
+ self.first_time = 0
+ self.last_time = 2 ** 64
+
+ query = QSqlQuery(glb.db)
+ QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
+ if query.next():
+ self.last_id = int(query.value(0))
+ self.last_time = int(query.value(1))
+ QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
+ if query.next():
+ self.first_time = int(query.value(0))
+ if placeholder_text:
+ placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
+
+ super(SampleTimeRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
+
def IdBetween(self, query, lower_id, higher_id, order):
QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1")
if query.next():
@@ -1560,7 +1930,6 @@ class SQLTableDialogDataItem():
return str(lower_id)
def ConvertRelativeTime(self, val):
- print "val ", val
mult = 1
suffix = val[-2:]
if suffix == "ms":
@@ -1582,29 +1951,23 @@ class SQLTableDialogDataItem():
return str(val)
def ConvertTimeRange(self, vrange):
- print "vrange ", vrange
if vrange[0] == "":
vrange[0] = str(self.first_time)
if vrange[1] == "":
vrange[1] = str(self.last_time)
vrange[0] = self.ConvertRelativeTime(vrange[0])
vrange[1] = self.ConvertRelativeTime(vrange[1])
- print "vrange2 ", vrange
if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
return False
- print "ok1"
beg_range = max(int(vrange[0]), self.first_time)
end_range = min(int(vrange[1]), self.last_time)
if beg_range > self.last_time or end_range < self.first_time:
return False
- print "ok2"
vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True)
vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False)
- print "vrange3 ", vrange
return True
def AddTimeRange(self, value, ranges):
- print "value ", value
n = value.count("-")
if n == 1:
pass
@@ -1622,111 +1985,31 @@ class SQLTableDialogDataItem():
return True
return False
- def InvalidValue(self, value):
- self.value = ""
- palette = QPalette()
- palette.setColor(QPalette.Text,Qt.red)
- self.widget.setPalette(palette)
- self.red = True
- self.error = self.label + " invalid value '" + value + "'"
- self.parent.ShowMessage(self.error)
-
- def IsNumber(self, value):
- try:
- x = int(value)
- except:
- x = 0
- return str(x) == value
-
- def Invalidate(self):
- self.validated = False
-
- def Validate(self):
- input_string = self.widget.text()
- self.validated = True
- if self.red:
- palette = QPalette()
- self.widget.setPalette(palette)
- self.red = False
- if not len(input_string.strip()):
- self.error = ""
- self.value = ""
- return
- if self.table_name == "<timeranges>":
- ranges = []
- for value in [x.strip() for x in input_string.split(",")]:
- if not self.AddTimeRange(value, ranges):
- return self.InvalidValue(value)
- ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
- self.value = " OR ".join(ranges)
- elif self.table_name == "<ranges>":
- singles = []
- ranges = []
- for value in [x.strip() for x in input_string.split(",")]:
- if "-" in value:
- vrange = value.split("-")
- if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
- return self.InvalidValue(value)
- ranges.append(vrange)
- else:
- if not self.IsNumber(value):
- return self.InvalidValue(value)
- singles.append(value)
- ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
- if len(singles):
- ranges.append(self.column_name1 + " IN (" + ",".join(singles) + ")")
- self.value = " OR ".join(ranges)
- elif self.table_name:
- all_ids = []
- for value in [x.strip() for x in input_string.split(",")]:
- ids = self.ValueToIds(value)
- if len(ids):
- all_ids.extend(ids)
- else:
- return self.InvalidValue(value)
- self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
- if self.column_name2:
- self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
- else:
- self.value = input_string.strip()
- self.error = ""
- self.parent.ClearMessage()
-
- def IsValid(self):
- if not self.validated:
- self.Validate()
- if len(self.error):
- self.parent.ShowMessage(self.error)
- return False
- return True
+ def DoValidate(self, input_string):
+ ranges = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ if not self.AddTimeRange(value, ranges):
+ return self.InvalidValue(value)
+ ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
+ self.value = " OR ".join(ranges)
-# Selected branch report creation dialog
+# Report Dialog Base
-class SelectedBranchDialog(QDialog):
+class ReportDialogBase(QDialog):
- def __init__(self, glb, parent=None):
- super(SelectedBranchDialog, self).__init__(parent)
+ def __init__(self, glb, title, items, partial, parent=None):
+ super(ReportDialogBase, self).__init__(parent)
self.glb = glb
- self.name = ""
- self.where_clause = ""
+ self.report_vars = ReportVars()
- self.setWindowTitle("Selected Branches")
+ self.setWindowTitle(title)
self.setMinimumWidth(600)
- items = (
- ("Report name:", "Enter a name to appear in the window title bar", "", "", "", ""),
- ("Time ranges:", "Enter time ranges", "<timeranges>", "", "samples.id", ""),
- ("CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "<ranges>", "", "cpu", ""),
- ("Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", ""),
- ("PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", ""),
- ("TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", ""),
- ("DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id"),
- ("Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id"),
- ("Raw SQL clause: ", "Enter a raw SQL WHERE clause", "", "", "", ""),
- )
- self.data_items = [SQLTableDialogDataItem(glb, *x, parent=self) for x in items]
+ self.data_items = [x(glb, self) for x in items]
+
+ self.partial = partial
self.grid = QGridLayout()
@@ -1758,23 +2041,28 @@ class SelectedBranchDialog(QDialog):
self.setLayout(self.vbox);
def Ok(self):
- self.name = self.data_items[0].value
- if not self.name:
+ vars = self.report_vars
+ for d in self.data_items:
+ if d.id == "REPORTNAME":
+ vars.name = d.value
+ if not vars.name:
self.ShowMessage("Report name is required")
return
for d in self.data_items:
if not d.IsValid():
return
for d in self.data_items[1:]:
- if len(d.value):
- if len(self.where_clause):
- self.where_clause += " AND "
- self.where_clause += d.value
- if len(self.where_clause):
- self.where_clause = " AND ( " + self.where_clause + " ) "
- else:
- self.ShowMessage("No selection")
- return
+ if d.id == "LIMIT":
+ vars.limit = d.value
+ elif len(d.value):
+ if len(vars.where_clause):
+ vars.where_clause += " AND "
+ vars.where_clause += d.value
+ if len(vars.where_clause):
+ if self.partial:
+ vars.where_clause = " AND ( " + vars.where_clause + " ) "
+ else:
+ vars.where_clause = " WHERE " + vars.where_clause + " "
self.accept()
def ShowMessage(self, msg):
@@ -1783,6 +2071,23 @@ class SelectedBranchDialog(QDialog):
def ClearMessage(self):
self.status.setText("")
+# Selected branch report creation dialog
+
+class SelectedBranchDialog(ReportDialogBase):
+
+ def __init__(self, glb, parent=None):
+ title = "Selected Branches"
+ items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
+ lambda g, p: SampleTimeRangesDataItem(g, "Time ranges:", "Enter time ranges", "samples.id", p),
+ lambda g, p: NonNegativeIntegerRangesDataItem(g, "CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "cpu", p),
+ lambda g, p: SQLTableDataItem(g, "Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id", p),
+ lambda g, p: SQLTableDataItem(g, "Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id", p),
+ lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p))
+ super(SelectedBranchDialog, self).__init__(glb, title, items, True, parent)
+
# Event list
def GetEventList(db):
@@ -1793,13 +2098,15 @@ def GetEventList(db):
events.append(query.value(0))
return events
-# SQL data preparation
+# Is a table selectable
-def SQLTableDataPrep(query, count):
- data = []
- for i in xrange(count):
- data.append(query.value(i))
- return data
+def IsSelectable(db, table, sql = ""):
+ query = QSqlQuery(db)
+ try:
+ QueryExec(query, "SELECT * FROM " + table + " " + sql + " LIMIT 1")
+ except:
+ return False
+ return True
# SQL table data model item
@@ -1818,12 +2125,13 @@ class SQLTableModel(TableModel):
progress = Signal(object)
- def __init__(self, glb, sql, column_count, parent=None):
+ def __init__(self, glb, sql, column_headers, parent=None):
super(SQLTableModel, self).__init__(parent)
self.glb = glb
self.more = True
self.populated = 0
- self.fetcher = SQLFetcher(glb, sql, lambda x, y=column_count: SQLTableDataPrep(x, y), self.AddSample)
+ self.column_headers = column_headers
+ self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
@@ -1861,6 +2169,18 @@ class SQLTableModel(TableModel):
def HasMoreRecords(self):
return self.more
+ def columnCount(self, parent=None):
+ return len(self.column_headers)
+
+ def columnHeader(self, column):
+ return self.column_headers[column]
+
+ def SQLTableDataPrep(self, query, count):
+ data = []
+ for i in xrange(count):
+ data.append(query.value(i))
+ return data
+
# SQL automatic table data model
class SQLAutoTableModel(SQLTableModel):
@@ -1870,12 +2190,12 @@ class SQLAutoTableModel(SQLTableModel):
if table_name == "comm_threads_view":
# For now, comm_threads_view has no id column
sql = "SELECT * FROM " + table_name + " WHERE comm_id > $$last_id$$ ORDER BY comm_id LIMIT " + str(glb_chunk_sz)
- self.column_headers = []
+ column_headers = []
query = QSqlQuery(glb.db)
if glb.dbref.is_sqlite3:
QueryExec(query, "PRAGMA table_info(" + table_name + ")")
while query.next():
- self.column_headers.append(query.value(1))
+ column_headers.append(query.value(1))
if table_name == "sqlite_master":
sql = "SELECT * FROM " + table_name
else:
@@ -1888,14 +2208,32 @@ class SQLAutoTableModel(SQLTableModel):
schema = "public"
QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
while query.next():
- self.column_headers.append(query.value(0))
- super(SQLAutoTableModel, self).__init__(glb, sql, len(self.column_headers), parent)
-
- def columnCount(self, parent=None):
- return len(self.column_headers)
-
- def columnHeader(self, column):
- return self.column_headers[column]
+ column_headers.append(query.value(0))
+ if pyside_version_1 and sys.version_info[0] == 3:
+ if table_name == "samples_view":
+ self.SQLTableDataPrep = self.samples_view_DataPrep
+ if table_name == "samples":
+ self.SQLTableDataPrep = self.samples_DataPrep
+ super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
+
+ def samples_view_DataPrep(self, query, count):
+ data = []
+ data.append(query.value(0))
+ # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+ data.append("{:>19}".format(query.value(1)))
+ for i in xrange(2, count):
+ data.append(query.value(i))
+ return data
+
+ def samples_DataPrep(self, query, count):
+ data = []
+ for i in xrange(9):
+ data.append(query.value(i))
+ # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
+ data.append("{:>19}".format(query.value(9)))
+ for i in xrange(10, count):
+ data.append(query.value(i))
+ return data
# Base class for custom ResizeColumnsToContents
@@ -1934,6 +2272,240 @@ class ResizeColumnsToContentsBase(QObject):
self.data_model.rowsInserted.disconnect(self.UpdateColumnWidths)
self.ResizeColumnsToContents()
+# Convert value to CSV
+
+def ToCSValue(val):
+ if '"' in val:
+ val = val.replace('"', '""')
+ if "," in val or '"' in val:
+ val = '"' + val + '"'
+ return val
+
+# Key to sort table model indexes by row / column, assuming fewer than 1000 columns
+
+glb_max_cols = 1000
+
+def RowColumnKey(a):
+ return a.row() * glb_max_cols + a.column()
+
+# Copy selected table cells to clipboard
+
+def CopyTableCellsToClipboard(view, as_csv=False, with_hdr=False):
+ indexes = sorted(view.selectedIndexes(), key=RowColumnKey)
+ idx_cnt = len(indexes)
+ if not idx_cnt:
+ return
+ if idx_cnt == 1:
+ with_hdr=False
+ min_row = indexes[0].row()
+ max_row = indexes[0].row()
+ min_col = indexes[0].column()
+ max_col = indexes[0].column()
+ for i in indexes:
+ min_row = min(min_row, i.row())
+ max_row = max(max_row, i.row())
+ min_col = min(min_col, i.column())
+ max_col = max(max_col, i.column())
+ if max_col > glb_max_cols:
+ raise RuntimeError("glb_max_cols is too low")
+ max_width = [0] * (1 + max_col - min_col)
+ for i in indexes:
+ c = i.column() - min_col
+ max_width[c] = max(max_width[c], len(str(i.data())))
+ text = ""
+ pad = ""
+ sep = ""
+ if with_hdr:
+ model = indexes[0].model()
+ for col in range(min_col, max_col + 1):
+ val = model.headerData(col, Qt.Horizontal)
+ if as_csv:
+ text += sep + ToCSValue(val)
+ sep = ","
+ else:
+ c = col - min_col
+ max_width[c] = max(max_width[c], len(val))
+ width = max_width[c]
+ align = model.headerData(col, Qt.Horizontal, Qt.TextAlignmentRole)
+ if align & Qt.AlignRight:
+ val = val.rjust(width)
+ text += pad + sep + val
+ pad = " " * (width - len(val))
+ sep = " "
+ text += "\n"
+ pad = ""
+ sep = ""
+ last_row = min_row
+ for i in indexes:
+ if i.row() > last_row:
+ last_row = i.row()
+ text += "\n"
+ pad = ""
+ sep = ""
+ if as_csv:
+ text += sep + ToCSValue(str(i.data()))
+ sep = ","
+ else:
+ width = max_width[i.column() - min_col]
+ if i.data(Qt.TextAlignmentRole) & Qt.AlignRight:
+ val = str(i.data()).rjust(width)
+ else:
+ val = str(i.data())
+ text += pad + sep + val
+ pad = " " * (width - len(val))
+ sep = " "
+ QApplication.clipboard().setText(text)
+
+def CopyTreeCellsToClipboard(view, as_csv=False, with_hdr=False):
+ indexes = view.selectedIndexes()
+ if not len(indexes):
+ return
+
+ selection = view.selectionModel()
+
+ first = None
+ for i in indexes:
+ above = view.indexAbove(i)
+ if not selection.isSelected(above):
+ first = i
+ break
+
+ if first is None:
+ raise RuntimeError("CopyTreeCellsToClipboard internal error")
+
+ model = first.model()
+ row_cnt = 0
+ col_cnt = model.columnCount(first)
+ max_width = [0] * col_cnt
+
+ indent_sz = 2
+ indent_str = " " * indent_sz
+
+ expanded_mark_sz = 2
+ if sys.version_info[0] == 3:
+ expanded_mark = "\u25BC "
+ not_expanded_mark = "\u25B6 "
+ else:
+ expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xBC) + " ", "utf-8")
+ not_expanded_mark = unicode(chr(0xE2) + chr(0x96) + chr(0xB6) + " ", "utf-8")
+ leaf_mark = " "
+
+ if not as_csv:
+ pos = first
+ while True:
+ row_cnt += 1
+ row = pos.row()
+ for c in range(col_cnt):
+ i = pos.sibling(row, c)
+ if c:
+ n = len(str(i.data()))
+ else:
+ n = len(str(i.data()).strip())
+ n += (i.internalPointer().level - 1) * indent_sz
+ n += expanded_mark_sz
+ max_width[c] = max(max_width[c], n)
+ pos = view.indexBelow(pos)
+ if not selection.isSelected(pos):
+ break
+
+ text = ""
+ pad = ""
+ sep = ""
+ if with_hdr:
+ for c in range(col_cnt):
+ val = model.headerData(c, Qt.Horizontal, Qt.DisplayRole).strip()
+ if as_csv:
+ text += sep + ToCSValue(val)
+ sep = ","
+ else:
+ max_width[c] = max(max_width[c], len(val))
+ width = max_width[c]
+ align = model.headerData(c, Qt.Horizontal, Qt.TextAlignmentRole)
+ if align & Qt.AlignRight:
+ val = val.rjust(width)
+ text += pad + sep + val
+ pad = " " * (width - len(val))
+ sep = " "
+ text += "\n"
+ pad = ""
+ sep = ""
+
+ pos = first
+ while True:
+ row = pos.row()
+ for c in range(col_cnt):
+ i = pos.sibling(row, c)
+ val = str(i.data())
+ if not c:
+ if model.hasChildren(i):
+ if view.isExpanded(i):
+ mark = expanded_mark
+ else:
+ mark = not_expanded_mark
+ else:
+ mark = leaf_mark
+ val = indent_str * (i.internalPointer().level - 1) + mark + val.strip()
+ if as_csv:
+ text += sep + ToCSValue(val)
+ sep = ","
+ else:
+ width = max_width[c]
+ if c and i.data(Qt.TextAlignmentRole) & Qt.AlignRight:
+ val = val.rjust(width)
+ text += pad + sep + val
+ pad = " " * (width - len(val))
+ sep = " "
+ pos = view.indexBelow(pos)
+ if not selection.isSelected(pos):
+ break
+ text = text.rstrip() + "\n"
+ pad = ""
+ sep = ""
+
+ QApplication.clipboard().setText(text)
+
+def CopyCellsToClipboard(view, as_csv=False, with_hdr=False):
+ view.CopyCellsToClipboard(view, as_csv, with_hdr)
+
+def CopyCellsToClipboardHdr(view):
+ CopyCellsToClipboard(view, False, True)
+
+def CopyCellsToClipboardCSV(view):
+ CopyCellsToClipboard(view, True, True)
+
+# Context menu
+
+class ContextMenu(object):
+
+ def __init__(self, view):
+ self.view = view
+ self.view.setContextMenuPolicy(Qt.CustomContextMenu)
+ self.view.customContextMenuRequested.connect(self.ShowContextMenu)
+
+ def ShowContextMenu(self, pos):
+ menu = QMenu(self.view)
+ self.AddActions(menu)
+ menu.exec_(self.view.mapToGlobal(pos))
+
+ def AddCopy(self, menu):
+ menu.addAction(CreateAction("&Copy selection", "Copy to clipboard", lambda: CopyCellsToClipboardHdr(self.view), self.view))
+ menu.addAction(CreateAction("Copy selection as CS&V", "Copy to clipboard as CSV", lambda: CopyCellsToClipboardCSV(self.view), self.view))
+
+ def AddActions(self, menu):
+ self.AddCopy(menu)
+
+class TreeContextMenu(ContextMenu):
+
+ def __init__(self, view):
+ super(TreeContextMenu, self).__init__(view)
+
+ def AddActions(self, menu):
+ i = self.view.currentIndex()
+ text = str(i.data()).strip()
+ if len(text):
+ menu.addAction(CreateAction('Copy "' + text + '"', "Copy to clipboard", lambda: QApplication.clipboard().setText(text), self.view))
+ self.AddCopy(menu)
+
# Table window
class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
@@ -1952,9 +2524,13 @@ class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
self.view.verticalHeader().setVisible(False)
self.view.sortByColumn(-1, Qt.AscendingOrder)
self.view.setSortingEnabled(True)
+ self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
+ self.view.CopyCellsToClipboard = CopyTableCellsToClipboard
self.ResizeColumnsToContents()
+ self.context_menu = ContextMenu(self.view)
+
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.data_model)
@@ -1998,6 +2574,107 @@ def GetTableList(glb):
tables.append("information_schema.columns")
return tables
+# Top Calls data model
+
+class TopCallsModel(SQLTableModel):
+
+ def __init__(self, glb, report_vars, parent=None):
+ text = ""
+ if not glb.dbref.is_sqlite3:
+ text = "::text"
+ limit = ""
+ if len(report_vars.limit):
+ limit = " LIMIT " + report_vars.limit
+ sql = ("SELECT comm, pid, tid, name,"
+ " CASE"
+ " WHEN (short_name = '[kernel.kallsyms]') THEN '[kernel]'" + text +
+ " ELSE short_name"
+ " END AS dso,"
+ " call_time, return_time, (return_time - call_time) AS elapsed_time, branch_count, "
+ " CASE"
+ " WHEN (calls.flags = 1) THEN 'no call'" + text +
+ " WHEN (calls.flags = 2) THEN 'no return'" + text +
+ " WHEN (calls.flags = 3) THEN 'no call/return'" + text +
+ " ELSE ''" + text +
+ " END AS flags"
+ " FROM calls"
+ " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+ " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+ " INNER JOIN dsos ON symbols.dso_id = dsos.id"
+ " INNER JOIN comms ON calls.comm_id = comms.id"
+ " INNER JOIN threads ON calls.thread_id = threads.id" +
+ report_vars.where_clause +
+ " ORDER BY elapsed_time DESC" +
+ limit
+ )
+ column_headers = ("Command", "PID", "TID", "Symbol", "Object", "Call Time", "Return Time", "Elapsed Time (ns)", "Branch Count", "Flags")
+ self.alignment = (Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignLeft)
+ super(TopCallsModel, self).__init__(glb, sql, column_headers, parent)
+
+ def columnAlignment(self, column):
+ return self.alignment[column]
+
+# Top Calls report creation dialog
+
+class TopCallsDialog(ReportDialogBase):
+
+ def __init__(self, glb, parent=None):
+ title = "Top Calls by Elapsed Time"
+ items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
+ lambda g, p: SQLTableDataItem(g, "Commands:", "Only calls with these commands will be included", "comms", "comm", "comm_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "PIDs:", "Only calls with these process IDs will be included", "threads", "pid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "TIDs:", "Only calls with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "DSOs:", "Only calls with these DSOs will be included", "dsos", "short_name", "dso_id", "", p),
+ lambda g, p: SQLTableDataItem(g, "Symbols:", "Only calls with these symbols will be included", "symbols", "name", "symbol_id", "", p),
+ lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p),
+ lambda g, p: PositiveIntegerDataItem(g, "Record limit:", "Limit selection to this number of records", p, "LIMIT", "100"))
+ super(TopCallsDialog, self).__init__(glb, title, items, False, parent)
+
+# Top Calls window
+
+class TopCallsWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
+
+ def __init__(self, glb, report_vars, parent=None):
+ super(TopCallsWindow, self).__init__(parent)
+
+ self.data_model = LookupCreateModel("Top Calls " + report_vars.UniqueId(), lambda: TopCallsModel(glb, report_vars))
+ self.model = self.data_model
+
+ self.view = QTableView()
+ self.view.setModel(self.model)
+ self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
+ self.view.verticalHeader().setVisible(False)
+ self.view.setSelectionMode(QAbstractItemView.ContiguousSelection)
+ self.view.CopyCellsToClipboard = CopyTableCellsToClipboard
+
+ self.context_menu = ContextMenu(self.view)
+
+ self.ResizeColumnsToContents()
+
+ self.find_bar = FindBar(self, self, True)
+
+ self.finder = ChildDataItemFinder(self.model)
+
+ self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
+
+ self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
+
+ self.setWidget(self.vbox.Widget())
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name)
+
+ def Find(self, value, direction, pattern, context):
+ self.view.setFocus()
+ self.find_bar.Busy()
+ self.finder.Find(value, direction, pattern, context, self.FindDone)
+
+ def FindDone(self, row):
+ self.find_bar.Idle()
+ if row >= 0:
+ self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
+ else:
+ self.find_bar.NotFound()
+
# Action Definition
def CreateAction(label, tip, callback, parent=None, shortcut=None):
@@ -2099,8 +2776,10 @@ p.c2 {
</style>
<p class=c1><a href=#reports>1. Reports</a></p>
<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
-<p class=c2><a href=#allbranches>1.2 All branches</a></p>
-<p class=c2><a href=#selectedbranches>1.3 Selected branches</a></p>
+<p class=c2><a href=#calltree>1.2 Call Tree</a></p>
+<p class=c2><a href=#allbranches>1.3 All branches</a></p>
+<p class=c2><a href=#selectedbranches>1.4 Selected branches</a></p>
+<p class=c2><a href=#topcallsbyelapsedtime>1.5 Top calls by elapsed time</a></p>
<p class=c1><a href=#tables>2. Tables</a></p>
<h1 id=reports>1. Reports</h1>
<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
@@ -2136,7 +2815,10 @@ v- ls
<h3>Find</h3>
Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
The pattern matching symbols are ? for any character and * for zero or more characters.
-<h2 id=allbranches>1.2 All branches</h2>
+<h2 id=calltree>1.2 Call Tree</h2>
+The Call Tree report is very similar to the Context-Sensitive Call Graph, but the data is not aggregated.
+Also the 'Count' column, which would be always 1, is replaced by the 'Call Time'.
+<h2 id=allbranches>1.3 All branches</h2>
The All branches report displays all branches in chronological order.
Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
<h3>Disassembly</h3>
@@ -2162,10 +2844,10 @@ sudo ldconfig
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.
All columns are searched, but only currently fetched rows are searched.
-<h2 id=selectedbranches>1.3 Selected branches</h2>
+<h2 id=selectedbranches>1.4 Selected branches</h2>
This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
by various selection criteria. A dialog box displays available criteria which are AND'ed together.
-<h3>1.3.1 Time ranges</h3>
+<h3>1.4.1 Time ranges</h3>
The time ranges hint text shows the total time range. Relative time ranges can also be entered in
ms, us or ns. Also, negative values are relative to the end of trace. Examples:
<pre>
@@ -2176,6 +2858,10 @@ ms, us or ns. Also, negative values are relative to the end of trace. Examples:
-10ms- The last 10ms
</pre>
N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
+<h2 id=topcallsbyelapsedtime>1.5 Top calls by elapsed time</h2>
+The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned.
+The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together.
+If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar.
<h1 id=tables>2. Tables</h1>
The Tables menu shows all tables and views in the database. Most tables have an associated view
which displays the information in a more friendly way. Not all data for large tables is fetched
@@ -2227,6 +2913,60 @@ class HelpOnlyWindow(QMainWindow):
self.setCentralWidget(self.text)
+# PostqreSQL server version
+
+def PostqreSQLServerVersion(db):
+ query = QSqlQuery(db)
+ QueryExec(query, "SELECT VERSION()")
+ if query.next():
+ v_str = query.value(0)
+ v_list = v_str.strip().split(" ")
+ if v_list[0] == "PostgreSQL" and v_list[2] == "on":
+ return v_list[1]
+ return v_str
+ return "Unknown"
+
+# SQLite version
+
+def SQLiteVersion(db):
+ query = QSqlQuery(db)
+ QueryExec(query, "SELECT sqlite_version()")
+ if query.next():
+ return query.value(0)
+ return "Unknown"
+
+# About dialog
+
+class AboutDialog(QDialog):
+
+ def __init__(self, glb, parent=None):
+ super(AboutDialog, self).__init__(parent)
+
+ self.setWindowTitle("About Exported SQL Viewer")
+ self.setMinimumWidth(300)
+
+ pyside_version = "1" if pyside_version_1 else "2"
+
+ text = "<pre>"
+ text += "Python version: " + sys.version.split(" ")[0] + "\n"
+ text += "PySide version: " + pyside_version + "\n"
+ text += "Qt version: " + qVersion() + "\n"
+ if glb.dbref.is_sqlite3:
+ text += "SQLite version: " + SQLiteVersion(glb.db) + "\n"
+ else:
+ text += "PostqreSQL version: " + PostqreSQLServerVersion(glb.db) + "\n"
+ text += "</pre>"
+
+ self.text = QTextBrowser()
+ self.text.setHtml(text)
+ self.text.setReadOnly(True)
+ self.text.setOpenExternalLinks(True)
+
+ self.vbox = QVBoxLayout()
+ self.vbox.addWidget(self.text)
+
+ self.setLayout(self.vbox);
+
# Font resize
def ResizeFont(widget, diff):
@@ -2299,22 +3039,46 @@ class MainWindow(QMainWindow):
file_menu.addAction(CreateExitAction(glb.app, self))
edit_menu = menu.addMenu("&Edit")
+ edit_menu.addAction(CreateAction("&Copy", "Copy to clipboard", self.CopyToClipboard, self, QKeySequence.Copy))
+ edit_menu.addAction(CreateAction("Copy as CS&V", "Copy to clipboard as CSV", self.CopyToClipboardCSV, self))
edit_menu.addAction(CreateAction("&Find...", "Find items", self.Find, self, QKeySequence.Find))
edit_menu.addAction(CreateAction("Fetch &more records...", "Fetch more records", self.FetchMoreRecords, self, [QKeySequence(Qt.Key_F8)]))
edit_menu.addAction(CreateAction("&Shrink Font", "Make text smaller", self.ShrinkFont, self, [QKeySequence("Ctrl+-")]))
edit_menu.addAction(CreateAction("&Enlarge Font", "Make text bigger", self.EnlargeFont, self, [QKeySequence("Ctrl++")]))
reports_menu = menu.addMenu("&Reports")
- reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
+ if IsSelectable(glb.db, "calls"):
+ reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
+
+ if IsSelectable(glb.db, "calls", "WHERE parent_id >= 0"):
+ reports_menu.addAction(CreateAction("Call &Tree", "Create a new window containing a call tree", self.NewCallTree, self))
self.EventMenu(GetEventList(glb.db), reports_menu)
+ if IsSelectable(glb.db, "calls"):
+ reports_menu.addAction(CreateAction("&Top calls by elapsed time", "Create a new window displaying top calls by elapsed time", self.NewTopCalls, self))
+
self.TableMenu(GetTableList(glb), menu)
self.window_menu = WindowMenu(self.mdi_area, menu)
help_menu = menu.addMenu("&Help")
help_menu.addAction(CreateAction("&Exported SQL Viewer Help", "Helpful information", self.Help, self, QKeySequence.HelpContents))
+ help_menu.addAction(CreateAction("&About Exported SQL Viewer", "About this application", self.About, self))
+
+ def Try(self, fn):
+ win = self.mdi_area.activeSubWindow()
+ if win:
+ try:
+ fn(win.view)
+ except:
+ pass
+
+ def CopyToClipboard(self):
+ self.Try(CopyCellsToClipboardHdr)
+
+ def CopyToClipboardCSV(self):
+ self.Try(CopyCellsToClipboardCSV)
def Find(self):
win = self.mdi_area.activeSubWindow()
@@ -2333,12 +3097,10 @@ class MainWindow(QMainWindow):
pass
def ShrinkFont(self):
- win = self.mdi_area.activeSubWindow()
- ShrinkFont(win.view)
+ self.Try(ShrinkFont)
def EnlargeFont(self):
- win = self.mdi_area.activeSubWindow()
- EnlargeFont(win.view)
+ self.Try(EnlargeFont)
def EventMenu(self, events, reports_menu):
branches_events = 0
@@ -2364,14 +3126,23 @@ class MainWindow(QMainWindow):
def NewCallGraph(self):
CallGraphWindow(self.glb, self)
+ def NewCallTree(self):
+ CallTreeWindow(self.glb, self)
+
+ def NewTopCalls(self):
+ dialog = TopCallsDialog(self.glb, self)
+ ret = dialog.exec_()
+ if ret:
+ TopCallsWindow(self.glb, dialog.report_vars, self)
+
def NewBranchView(self, event_id):
- BranchWindow(self.glb, event_id, "", "", self)
+ BranchWindow(self.glb, event_id, ReportVars(), self)
def NewSelectedBranchView(self, event_id):
dialog = SelectedBranchDialog(self.glb, self)
ret = dialog.exec_()
if ret:
- BranchWindow(self.glb, event_id, dialog.name, dialog.where_clause, self)
+ BranchWindow(self.glb, event_id, dialog.report_vars, self)
def NewTableView(self, table_name):
TableWindow(self.glb, table_name, self)
@@ -2379,6 +3150,10 @@ class MainWindow(QMainWindow):
def Help(self):
HelpWindow(self.glb, self)
+ def About(self):
+ dialog = AboutDialog(self.glb, self)
+ dialog.exec_()
+
# XED Disassembler
class xed_state_t(Structure):
@@ -2459,9 +3234,13 @@ class LibXED():
ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
if not ok:
return 0, ""
+ if sys.version_info[0] == 2:
+ result = inst.buffer.value
+ else:
+ result = inst.buffer.value.decode()
# Return instruction length and the disassembled instruction text
# For now, assume the length is in byte 166
- return inst.xedd[166], inst.buffer.value
+ return inst.xedd[166], result
def TryOpen(file_name):
try:
@@ -2477,9 +3256,14 @@ def Is64Bit(f):
header = f.read(7)
f.seek(pos)
magic = header[0:4]
- eclass = ord(header[4])
- encoding = ord(header[5])
- version = ord(header[6])
+ if sys.version_info[0] == 2:
+ eclass = ord(header[4])
+ encoding = ord(header[5])
+ version = ord(header[6])
+ else:
+ eclass = header[4]
+ encoding = header[5]
+ version = header[6]
if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1:
result = True if eclass == 2 else False
return result
@@ -2578,7 +3362,7 @@ class DBRef():
def Main():
if (len(sys.argv) < 2):
- print >> sys.stderr, "Usage is: exported-sql-viewer.py {<database name> | --help-only}"
+ printerr("Usage is: exported-sql-viewer.py {<database name> | --help-only}");
raise Exception("Too few arguments")
dbname = sys.argv[1]
@@ -2591,8 +3375,8 @@ def Main():
is_sqlite3 = False
try:
- f = open(dbname)
- if f.read(15) == "SQLite format 3":
+ f = open(dbname, "rb")
+ if f.read(15) == b'SQLite format 3':
is_sqlite3 = True
f.close()
except:
diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py
index cafeff3d74db..310efe5e7e23 100644
--- a/tools/perf/scripts/python/failed-syscalls-by-pid.py
+++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py
@@ -5,6 +5,8 @@
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+from __future__ import print_function
+
import os
import sys
@@ -32,7 +34,7 @@ if len(sys.argv) > 1:
syscalls = autodict()
def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")
def trace_end():
print_error_totals()
@@ -56,23 +58,22 @@ def syscalls__sys_exit(event_name, context, common_cpu,
raw_syscalls__sys_exit(**locals())
def print_error_totals():
- if for_comm is not None:
- print "\nsyscall errors for %s:\n\n" % (for_comm),
- else:
- print "\nsyscall errors:\n\n",
-
- print "%-30s %10s\n" % ("comm [pid]", "count"),
- print "%-30s %10s\n" % ("------------------------------", \
- "----------"),
-
- comm_keys = syscalls.keys()
- for comm in comm_keys:
- pid_keys = syscalls[comm].keys()
- for pid in pid_keys:
- print "\n%s [%d]\n" % (comm, pid),
- id_keys = syscalls[comm][pid].keys()
- for id in id_keys:
- print " syscall: %-16s\n" % syscall_name(id),
- ret_keys = syscalls[comm][pid][id].keys()
- for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
- print " err = %-20s %10d\n" % (strerror(ret), val),
+ if for_comm is not None:
+ print("\nsyscall errors for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall errors:\n")
+
+ print("%-30s %10s" % ("comm [pid]", "count"))
+ print("%-30s %10s" % ("------------------------------", "----------"))
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print("\n%s [%d]" % (comm, pid))
+ id_keys = syscalls[comm][pid].keys()
+ for id in id_keys:
+ print(" syscall: %-16s" % syscall_name(id))
+ ret_keys = syscalls[comm][pid][id].keys()
+ for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" err = %-20s %10d" % (strerror(ret), val))
diff --git a/tools/perf/scripts/python/futex-contention.py b/tools/perf/scripts/python/futex-contention.py
index 0f5cf437b602..0c4841acf75d 100644
--- a/tools/perf/scripts/python/futex-contention.py
+++ b/tools/perf/scripts/python/futex-contention.py
@@ -10,6 +10,8 @@
#
# Measures futex contention
+from __future__ import print_function
+
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
@@ -33,18 +35,18 @@ def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
nr, ret):
- if thread_blocktime.has_key(tid):
+ if tid in thread_blocktime:
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
- print "%s[%d] lock %x contended %d times, %d avg ns" % \
- (process_names[tid], tid, lock, count, avg)
+ print("%s[%d] lock %x contended %d times, %d avg ns" %
+ (process_names[tid], tid, lock, count, avg))
diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
index b19172d673af..a73847c8f548 100644
--- a/tools/perf/scripts/python/intel-pt-events.py
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -10,6 +10,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
+from __future__ import print_function
+
import os
import sys
import struct
@@ -22,34 +24,34 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
#from Core import *
def trace_begin():
- print "Intel PT Power Events and PTWRITE"
+ print("Intel PT Power Events and PTWRITE")
def trace_end():
- print "End"
+ print("End")
def trace_unhandled(event_name, context, event_fields_dict):
- print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
+ print(' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
def print_ptwrite(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
flags = data[0]
payload = data[1]
exact_ip = flags & 1
- print "IP: %u payload: %#x" % (exact_ip, payload),
+ print("IP: %u payload: %#x" % (exact_ip, payload), end=' ')
def print_cbr(raw_buf):
data = struct.unpack_from("<BBBBII", raw_buf)
cbr = data[0]
f = (data[4] + 500) / 1000
p = ((cbr * 1000 / data[2]) + 5) / 10
- print "%3u freq: %4u MHz (%3u%%)" % (cbr, f, p),
+ print("%3u freq: %4u MHz (%3u%%)" % (cbr, f, p), end=' ')
def print_mwait(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hints = payload & 0xff
extensions = (payload >> 32) & 0x3
- print "hints: %#x extensions: %#x" % (hints, extensions),
+ print("hints: %#x extensions: %#x" % (hints, extensions), end=' ')
def print_pwre(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
@@ -57,13 +59,14 @@ def print_pwre(raw_buf):
hw = (payload >> 7) & 1
cstate = (payload >> 12) & 0xf
subcstate = (payload >> 8) & 0xf
- print "hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+ print("hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
+ end=' ')
def print_exstop(raw_buf):
data = struct.unpack_from("<I", raw_buf)
flags = data[0]
exact_ip = flags & 1
- print "IP: %u" % (exact_ip),
+ print("IP: %u" % (exact_ip), end=' ')
def print_pwrx(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
@@ -71,36 +74,39 @@ def print_pwrx(raw_buf):
deepest_cstate = payload & 0xf
last_cstate = (payload >> 4) & 0xf
wake_reason = (payload >> 8) & 0xf
- print "deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason),
+ print("deepest cstate: %u last cstate: %u wake reason: %#x" %
+ (deepest_cstate, last_cstate, wake_reason), end=' ')
def print_common_start(comm, sample, name):
ts = sample["time"]
cpu = sample["cpu"]
pid = sample["pid"]
tid = sample["tid"]
- print "%16s %5u/%-5u [%03u] %9u.%09u %7s:" % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+ print("%16s %5u/%-5u [%03u] %9u.%09u %7s:" %
+ (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
+ end=' ')
def print_common_ip(sample, symbol, dso):
ip = sample["ip"]
- print "%16x %s (%s)" % (ip, symbol, dso)
+ print("%16x %s (%s)" % (ip, symbol, dso))
def process_event(param_dict):
- event_attr = param_dict["attr"]
- sample = param_dict["sample"]
- raw_buf = param_dict["raw_buf"]
- comm = param_dict["comm"]
- name = param_dict["ev_name"]
-
- # Symbol and dso info are not always resolved
- if (param_dict.has_key("dso")):
- dso = param_dict["dso"]
- else:
- dso = "[unknown]"
-
- if (param_dict.has_key("symbol")):
- symbol = param_dict["symbol"]
- else:
- symbol = "[unknown]"
+ event_attr = param_dict["attr"]
+ sample = param_dict["sample"]
+ raw_buf = param_dict["raw_buf"]
+ comm = param_dict["comm"]
+ name = param_dict["ev_name"]
+
+ # Symbol and dso info are not always resolved
+ if "dso" in param_dict:
+ dso = param_dict["dso"]
+ else:
+ dso = "[unknown]"
+
+ if "symbol" in param_dict:
+ symbol = param_dict["symbol"]
+ else:
+ symbol = "[unknown]"
if name == "ptwrite":
print_common_start(comm, sample, name)
diff --git a/tools/perf/scripts/python/mem-phys-addr.py b/tools/perf/scripts/python/mem-phys-addr.py
index ebee2c5ae496..1f332e72b9b0 100644
--- a/tools/perf/scripts/python/mem-phys-addr.py
+++ b/tools/perf/scripts/python/mem-phys-addr.py
@@ -4,6 +4,8 @@
# Copyright (c) 2018, Intel Corporation.
from __future__ import division
+from __future__ import print_function
+
import os
import sys
import struct
@@ -31,21 +33,24 @@ def parse_iomem():
for i, j in enumerate(f):
m = re.split('-|:',j,2)
if m[2].strip() == 'System RAM':
- system_ram.append(long(m[0], 16))
- system_ram.append(long(m[1], 16))
+ system_ram.append(int(m[0], 16))
+ system_ram.append(int(m[1], 16))
if m[2].strip() == 'Persistent Memory':
- pmem.append(long(m[0], 16))
- pmem.append(long(m[1], 16))
+ pmem.append(int(m[0], 16))
+ pmem.append(int(m[1], 16))
def print_memory_type():
- print "Event: %s" % (event_name)
- print "%-40s %10s %10s\n" % ("Memory type", "count", "percentage"),
- print "%-40s %10s %10s\n" % ("----------------------------------------", \
+ print("Event: %s" % (event_name))
+ print("%-40s %10s %10s\n" % ("Memory type", "count", "percentage"), end='')
+ print("%-40s %10s %10s\n" % ("----------------------------------------",
"-----------", "-----------"),
+ end='');
total = sum(load_mem_type_cnt.values())
for mem_type, count in sorted(load_mem_type_cnt.most_common(), \
- key = lambda(k, v): (v, k), reverse = True):
- print "%-40s %10d %10.1f%%\n" % (mem_type, count, 100 * count / total),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print("%-40s %10d %10.1f%%\n" %
+ (mem_type, count, 100 * count / total),
+ end='')
def trace_begin():
parse_iomem()
@@ -80,7 +85,7 @@ def find_memory_type(phys_addr):
f.seek(0, 0)
for j in f:
m = re.split('-|:',j,2)
- if long(m[0], 16) <= phys_addr <= long(m[1], 16):
+ if int(m[0], 16) <= phys_addr <= int(m[1], 16):
return m[2]
return "N/A"
diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py
index a150164b44a3..101059971738 100755
--- a/tools/perf/scripts/python/net_dropmonitor.py
+++ b/tools/perf/scripts/python/net_dropmonitor.py
@@ -1,11 +1,13 @@
# Monitor the system for dropped packets and proudce a report of drop locations and counts
# SPDX-License-Identifier: GPL-2.0
+from __future__ import print_function
+
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
@@ -50,19 +52,19 @@ def get_sym(sloc):
return (None, 0)
def print_drop_table():
- print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
+ print("%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT"))
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
- print "%25s %25s %25s" % (sym, off, drop_log[i])
+ print("%25s %25s %25s" % (sym, off, drop_log[i]))
def trace_begin():
- print "Starting trace (Ctrl-C to dump results)"
+ print("Starting trace (Ctrl-C to dump results)")
def trace_end():
- print "Gathering kallsyms data"
+ print("Gathering kallsyms data")
get_kallsyms_table()
print_drop_table()
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py
index 9b2050f778f1..ea0c8b90a783 100644
--- a/tools/perf/scripts/python/netdev-times.py
+++ b/tools/perf/scripts/python/netdev-times.py
@@ -8,6 +8,8 @@
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
+from __future__ import print_function
+
import os
import sys
@@ -17,6 +19,7 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \
from perf_trace_context import *
from Core import *
from Util import *
+from functools import cmp_to_key
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
@@ -61,12 +64,12 @@ def diff_msec(src, dst):
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
- print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
+ print("%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" %
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
- diff_msec(hunk['xmit_t'], hunk['free_t']))
+ diff_msec(hunk['xmit_t'], hunk['free_t'])))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
@@ -98,55 +101,57 @@ def print_receive(hunk):
if show_hunk == 0:
return
- print "%d.%06dsec cpu=%d" % \
- (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
+ print("%d.%06dsec cpu=%d" %
+ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu))
for i in range(len(irq_list)):
- print PF_IRQ_ENTRY % \
+ print(PF_IRQ_ENTRY %
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
- irq_list[i]['irq'], irq_list[i]['name'])
- print PF_JOINT
+ irq_list[i]['irq'], irq_list[i]['name']))
+ print(PF_JOINT)
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
- print PF_NET_RX % \
+ print(PF_NET_RX %
(diff_msec(base_t, irq_event['time']),
- irq_event['skbaddr'])
- print PF_JOINT
- print PF_SOFT_ENTRY % \
- diff_msec(base_t, hunk['sirq_ent_t'])
- print PF_JOINT
+ irq_event['skbaddr']))
+ print(PF_JOINT)
+ print(PF_SOFT_ENTRY %
+ diff_msec(base_t, hunk['sirq_ent_t']))
+ print(PF_JOINT)
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
- print PF_NAPI_POLL % \
- (diff_msec(base_t, event['event_t']), event['dev'])
+ print(PF_NAPI_POLL %
+ (diff_msec(base_t, event['event_t']),
+ event['dev']))
if i == len(event_list) - 1:
- print ""
+ print("")
else:
- print PF_JOINT
+ print(PF_JOINT)
else:
- print PF_NET_RECV % \
- (diff_msec(base_t, event['event_t']), event['skbaddr'],
- event['len'])
+ print(PF_NET_RECV %
+ (diff_msec(base_t, event['event_t']),
+ event['skbaddr'],
+ event['len']))
if 'comm' in event.keys():
- print PF_WJOINT
- print PF_CPY_DGRAM % \
+ print(PF_WJOINT)
+ print(PF_CPY_DGRAM %
(diff_msec(base_t, event['comm_t']),
- event['pid'], event['comm'])
+ event['pid'], event['comm']))
elif 'handle' in event.keys():
- print PF_WJOINT
+ print(PF_WJOINT)
if event['handle'] == "kfree_skb":
- print PF_KFREE_SKB % \
+ print(PF_KFREE_SKB %
(diff_msec(base_t,
event['comm_t']),
- event['location'])
+ event['location']))
elif event['handle'] == "consume_skb":
- print PF_CONS_SKB % \
+ print(PF_CONS_SKB %
diff_msec(base_t,
- event['comm_t'])
- print PF_JOINT
+ event['comm_t']))
+ print(PF_JOINT)
def trace_begin():
global show_tx
@@ -172,8 +177,7 @@ def trace_begin():
def trace_end():
# order all events in time
- all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
- b[EINFO_IDX_TIME]))
+ all_event_list.sort(key=cmp_to_key(lambda a,b :a[EINFO_IDX_TIME] < b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
@@ -210,19 +214,19 @@ def trace_end():
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
- print " dev len Qdisc " \
- " netdevice free"
+ print(" dev len Qdisc "
+ " netdevice free")
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
- print "debug buffer status"
- print "----------------------------"
- print "xmit Qdisc:remain:%d overflow:%d" % \
- (len(tx_queue_list), of_count_tx_queue_list)
- print "xmit netdevice:remain:%d overflow:%d" % \
- (len(tx_xmit_list), of_count_tx_xmit_list)
- print "receive:remain:%d overflow:%d" % \
- (len(rx_skb_list), of_count_rx_skb_list)
+ print("debug buffer status")
+ print("----------------------------")
+ print("xmit Qdisc:remain:%d overflow:%d" %
+ (len(tx_queue_list), of_count_tx_queue_list))
+ print("xmit netdevice:remain:%d overflow:%d" %
+ (len(tx_xmit_list), of_count_tx_xmit_list))
+ print("receive:remain:%d overflow:%d" %
+ (len(rx_skb_list), of_count_rx_skb_list))
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
@@ -254,7 +258,7 @@ def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, i
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi,
- dev_name, work=None, budget=None):
+ dev_name, work=None, budget=None):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name, work, budget)
all_event_list.append(event_info)
@@ -351,7 +355,7 @@ def handle_irq_softirq_exit(event_info):
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
- 'irq_list':irq_list, 'event_list':event_list}
+ 'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
@@ -388,7 +392,7 @@ def handle_netif_receive_skb(event_info):
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
- 'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
+ 'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
diff --git a/tools/perf/scripts/python/powerpc-hcalls.py b/tools/perf/scripts/python/powerpc-hcalls.py
index 00e0e7476e55..8b78dc790adb 100644
--- a/tools/perf/scripts/python/powerpc-hcalls.py
+++ b/tools/perf/scripts/python/powerpc-hcalls.py
@@ -4,6 +4,8 @@
#
# Hypervisor call statisics
+from __future__ import print_function
+
import os
import sys
@@ -149,7 +151,7 @@ hcall_table = {
}
def hcall_table_lookup(opcode):
- if (hcall_table.has_key(opcode)):
+ if (opcode in hcall_table):
return hcall_table[opcode]
else:
return opcode
@@ -157,8 +159,8 @@ def hcall_table_lookup(opcode):
print_ptrn = '%-28s%10s%10s%10s%10s'
def trace_end():
- print print_ptrn % ('hcall', 'count', 'min(ns)', 'max(ns)', 'avg(ns)')
- print '-' * 68
+ print(print_ptrn % ('hcall', 'count', 'min(ns)', 'max(ns)', 'avg(ns)'))
+ print('-' * 68)
for opcode in output:
h_name = hcall_table_lookup(opcode)
time = output[opcode]['time']
@@ -166,14 +168,14 @@ def trace_end():
min_t = output[opcode]['min']
max_t = output[opcode]['max']
- print print_ptrn % (h_name, cnt, min_t, max_t, time/cnt)
+ print(print_ptrn % (h_name, cnt, min_t, max_t, time//cnt))
def powerpc__hcall_exit(name, context, cpu, sec, nsec, pid, comm, callchain,
opcode, retval):
- if (d_enter.has_key(cpu) and d_enter[cpu].has_key(opcode)):
+ if (cpu in d_enter and opcode in d_enter[cpu]):
diff = nsecs(sec, nsec) - d_enter[cpu][opcode]
- if (output.has_key(opcode)):
+ if (opcode in output):
output[opcode]['time'] += diff
output[opcode]['cnt'] += 1
if (output[opcode]['min'] > diff):
@@ -190,11 +192,11 @@ def powerpc__hcall_exit(name, context, cpu, sec, nsec, pid, comm, callchain,
del d_enter[cpu][opcode]
# else:
-# print "Can't find matching hcall_enter event. Ignoring sample"
+# print("Can't find matching hcall_enter event. Ignoring sample")
def powerpc__hcall_entry(event_name, context, cpu, sec, nsec, pid, comm,
callchain, opcode):
- if (d_enter.has_key(cpu)):
+ if (cpu in d_enter):
d_enter[cpu][opcode] = nsecs(sec, nsec)
else:
d_enter[cpu] = {opcode: nsecs(sec, nsec)}
diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py
index 3473e7f66081..8196e3087c9e 100644
--- a/tools/perf/scripts/python/sched-migration.py
+++ b/tools/perf/scripts/python/sched-migration.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
@@ -16,10 +14,10 @@ import sys
from collections import defaultdict
try:
- from UserList import UserList
+ from UserList import UserList
except ImportError:
- # Python 3: UserList moved to the collections package
- from collections import UserList
+ # Python 3: UserList moved to the collections package
+ from collections import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
diff --git a/tools/perf/scripts/python/sctop.py b/tools/perf/scripts/python/sctop.py
index 61621b93affb..6e0278dcb092 100644
--- a/tools/perf/scripts/python/sctop.py
+++ b/tools/perf/scripts/python/sctop.py
@@ -8,7 +8,14 @@
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
-import os, sys, thread, time
+from __future__ import print_function
+
+import os, sys, time
+
+try:
+ import thread
+except ImportError:
+ import _thread as thread
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
@@ -62,18 +69,20 @@ def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
- print "\nsyscall events for %s:\n\n" % (for_comm),
+ print("\nsyscall events for %s:\n" % (for_comm))
else:
- print "\nsyscall events:\n\n",
+ print("\nsyscall events:\n")
- print "%-40s %10s\n" % ("event", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "----------"),
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" %
+ ("----------------------------------------",
+ "----------"))
- for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
- reverse = True):
+ for id, val in sorted(syscalls.items(),
+ key = lambda kv: (kv[1], kv[0]),
+ reverse = True):
try:
- print "%-40s %10d\n" % (syscall_name(id), val),
+ print("%-40s %10d" % (syscall_name(id), val))
except TypeError:
pass
syscalls.clear()
diff --git a/tools/perf/scripts/python/stackcollapse.py b/tools/perf/scripts/python/stackcollapse.py
index 1697b5e18c96..b1c4def1410a 100755
--- a/tools/perf/scripts/python/stackcollapse.py
+++ b/tools/perf/scripts/python/stackcollapse.py
@@ -19,13 +19,15 @@
# Written by Paolo Bonzini <pbonzini@redhat.com>
# Based on Brendan Gregg's stackcollapse-perf.pl script.
+from __future__ import print_function
+
import os
import sys
from collections import defaultdict
from optparse import OptionParser, make_option
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
- '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
@@ -120,7 +122,6 @@ def process_event(param_dict):
lines[stack_string] = lines[stack_string] + 1
def trace_end():
- list = lines.keys()
- list.sort()
+ list = sorted(lines)
for stack in list:
- print "%s %d" % (stack, lines[stack])
+ print("%s %d" % (stack, lines[stack]))
diff --git a/tools/perf/scripts/python/stat-cpi.py b/tools/perf/scripts/python/stat-cpi.py
index 8410672efb8b..01fa933ff3cf 100644
--- a/tools/perf/scripts/python/stat-cpi.py
+++ b/tools/perf/scripts/python/stat-cpi.py
@@ -1,6 +1,7 @@
-#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
+from __future__ import print_function
+
data = {}
times = []
threads = []
@@ -20,8 +21,8 @@ def store_key(time, cpu, thread):
threads.append(thread)
def store(time, event, cpu, thread, val, ena, run):
- #print "event %s cpu %d, thread %d, time %d, val %d, ena %d, run %d" % \
- # (event, cpu, thread, time, val, ena, run)
+ #print("event %s cpu %d, thread %d, time %d, val %d, ena %d, run %d" %
+ # (event, cpu, thread, time, val, ena, run))
store_key(time, cpu, thread)
key = get_key(time, event, cpu, thread)
@@ -59,7 +60,7 @@ def stat__interval(time):
if ins != 0:
cpi = cyc/float(ins)
- print "%15f: cpu %d, thread %d -> cpi %f (%d/%d)" % (time/(float(1000000000)), cpu, thread, cpi, cyc, ins)
+ print("%15f: cpu %d, thread %d -> cpi %f (%d/%d)" % (time/(float(1000000000)), cpu, thread, cpi, cyc, ins))
def trace_end():
pass
@@ -75,4 +76,4 @@ def trace_end():
# if ins != 0:
# cpi = cyc/float(ins)
#
-# print "time %.9f, cpu %d, thread %d -> cpi %f" % (time/(float(1000000000)), cpu, thread, cpi)
+# print("time %.9f, cpu %d, thread %d -> cpi %f" % (time/(float(1000000000)), cpu, thread, cpi))
diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py
index daf314cc5dd3..f254e40c6f0f 100644
--- a/tools/perf/scripts/python/syscall-counts-by-pid.py
+++ b/tools/perf/scripts/python/syscall-counts-by-pid.py
@@ -5,6 +5,8 @@
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+from __future__ import print_function
+
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
@@ -31,17 +33,16 @@ if len(sys.argv) > 1:
syscalls = autodict()
def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, id, args):
-
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, args):
if (for_comm and common_comm != for_comm) or \
- (for_pid and common_pid != for_pid ):
+ (for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
@@ -49,26 +50,26 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
syscalls[common_comm][common_pid][id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- id, args):
+ common_secs, common_nsecs, common_pid, common_comm,
+ id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
- if for_comm is not None:
- print "\nsyscall events for %s:\n\n" % (for_comm),
- else:
- print "\nsyscall events by comm/pid:\n\n",
-
- print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "----------"),
-
- comm_keys = syscalls.keys()
- for comm in comm_keys:
- pid_keys = syscalls[comm].keys()
- for pid in pid_keys:
- print "\n%s [%d]\n" % (comm, pid),
- id_keys = syscalls[comm][pid].keys()
- for id, val in sorted(syscalls[comm][pid].iteritems(), \
- key = lambda(k, v): (v, k), reverse = True):
- print " %-38s %10d\n" % (syscall_name(id), val),
+ if for_comm is not None:
+ print("\nsyscall events for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall events by comm/pid:\n")
+
+ print("%-40s %10s" % ("comm [pid]/syscalls", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "----------"))
+
+ comm_keys = syscalls.keys()
+ for comm in comm_keys:
+ pid_keys = syscalls[comm].keys()
+ for pid in pid_keys:
+ print("\n%s [%d]" % (comm, pid))
+ id_keys = syscalls[comm][pid].keys()
+ for id, val in sorted(syscalls[comm][pid].items(),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print(" %-38s %10d" % (syscall_name(id), val))
diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py
index e66a7730aeb5..8adb95ff1664 100644
--- a/tools/perf/scripts/python/syscall-counts.py
+++ b/tools/perf/scripts/python/syscall-counts.py
@@ -5,6 +5,8 @@
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
+from __future__ import print_function
+
import os
import sys
@@ -28,14 +30,14 @@ if len(sys.argv) > 1:
syscalls = autodict()
def trace_begin():
- print "Press control+C to stop and show the summary"
+ print("Press control+C to stop and show the summary")
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- common_callchain, id, args):
+ common_secs, common_nsecs, common_pid, common_comm,
+ common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
@@ -45,20 +47,19 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
- common_secs, common_nsecs, common_pid, common_comm,
- id, args):
+ common_secs, common_nsecs, common_pid, common_comm, id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
- if for_comm is not None:
- print "\nsyscall events for %s:\n\n" % (for_comm),
- else:
- print "\nsyscall events:\n\n",
-
- print "%-40s %10s\n" % ("event", "count"),
- print "%-40s %10s\n" % ("----------------------------------------", \
- "-----------"),
-
- for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
- reverse = True):
- print "%-40s %10d\n" % (syscall_name(id), val),
+ if for_comm is not None:
+ print("\nsyscall events for %s:\n" % (for_comm))
+ else:
+ print("\nsyscall events:\n")
+
+ print("%-40s %10s" % ("event", "count"))
+ print("%-40s %10s" % ("----------------------------------------",
+ "-----------"))
+
+ for id, val in sorted(syscalls.items(),
+ key = lambda kv: (kv[1], kv[0]), reverse = True):
+ print("%-40s %10d" % (syscall_name(id), val))
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
index e952127e4fb0..cb39ac46bc73 100644
--- a/tools/perf/tests/attr.py
+++ b/tools/perf/tests/attr.py
@@ -1,4 +1,3 @@
-#! /usr/bin/python
# SPDX-License-Identifier: GPL-2.0
from __future__ import print_function
diff --git a/tools/perf/tests/attr/test-record-C0 b/tools/perf/tests/attr/test-record-C0
index cb0a3138fa54..93818054ae20 100644
--- a/tools/perf/tests/attr/test-record-C0
+++ b/tools/perf/tests/attr/test-record-C0
@@ -1,6 +1,6 @@
[config]
command = record
-args = -C 0 kill >/dev/null 2>&1
+args = --no-bpf-event -C 0 kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-basic b/tools/perf/tests/attr/test-record-basic
index 85a23cf35ba1..b0ca42a5ecc9 100644
--- a/tools/perf/tests/attr/test-record-basic
+++ b/tools/perf/tests/attr/test-record-basic
@@ -1,6 +1,6 @@
[config]
command = record
-args = kill >/dev/null 2>&1
+args = --no-bpf-event kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-any b/tools/perf/tests/attr/test-record-branch-any
index 81f839e2fad0..1a99b3ce6b89 100644
--- a/tools/perf/tests/attr/test-record-branch-any
+++ b/tools/perf/tests/attr/test-record-branch-any
@@ -1,6 +1,6 @@
[config]
command = record
-args = -b kill >/dev/null 2>&1
+args = --no-bpf-event -b kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-any b/tools/perf/tests/attr/test-record-branch-filter-any
index 357421f4dfce..709768b508c6 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-any
+++ b/tools/perf/tests/attr/test-record-branch-filter-any
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j any kill >/dev/null 2>&1
+args = --no-bpf-event -j any kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-any_call b/tools/perf/tests/attr/test-record-branch-filter-any_call
index dbc55f2ab845..f943221f7825 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-any_call
+++ b/tools/perf/tests/attr/test-record-branch-filter-any_call
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j any_call kill >/dev/null 2>&1
+args = --no-bpf-event -j any_call kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-any_ret b/tools/perf/tests/attr/test-record-branch-filter-any_ret
index a0824ff8e131..fd4f5b4154a9 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-any_ret
+++ b/tools/perf/tests/attr/test-record-branch-filter-any_ret
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j any_ret kill >/dev/null 2>&1
+args = --no-bpf-event -j any_ret kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-hv b/tools/perf/tests/attr/test-record-branch-filter-hv
index f34d6f120181..4e52d685ebe1 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-hv
+++ b/tools/perf/tests/attr/test-record-branch-filter-hv
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j hv kill >/dev/null 2>&1
+args = --no-bpf-event -j hv kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-ind_call b/tools/perf/tests/attr/test-record-branch-filter-ind_call
index b86a35232248..e08c6ab3796e 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-ind_call
+++ b/tools/perf/tests/attr/test-record-branch-filter-ind_call
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j ind_call kill >/dev/null 2>&1
+args = --no-bpf-event -j ind_call kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-k b/tools/perf/tests/attr/test-record-branch-filter-k
index d3fbc5e1858a..b4b98f84fc2f 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-k
+++ b/tools/perf/tests/attr/test-record-branch-filter-k
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j k kill >/dev/null 2>&1
+args = --no-bpf-event -j k kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-u b/tools/perf/tests/attr/test-record-branch-filter-u
index a318f0dda173..fb9610edbb0d 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-u
+++ b/tools/perf/tests/attr/test-record-branch-filter-u
@@ -1,6 +1,6 @@
[config]
command = record
-args = -j u kill >/dev/null 2>&1
+args = --no-bpf-event -j u kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-count b/tools/perf/tests/attr/test-record-count
index 34f6cc577263..5e9b9019d786 100644
--- a/tools/perf/tests/attr/test-record-count
+++ b/tools/perf/tests/attr/test-record-count
@@ -1,6 +1,6 @@
[config]
command = record
-args = -c 123 kill >/dev/null 2>&1
+args = --no-bpf-event -c 123 kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-data b/tools/perf/tests/attr/test-record-data
index a9cf2233b0ce..a99bb13149c2 100644
--- a/tools/perf/tests/attr/test-record-data
+++ b/tools/perf/tests/attr/test-record-data
@@ -1,6 +1,6 @@
[config]
command = record
-args = -d kill >/dev/null 2>&1
+args = --no-bpf-event -d kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-freq b/tools/perf/tests/attr/test-record-freq
index bf4cb459f0d5..89e29f6b2ae0 100644
--- a/tools/perf/tests/attr/test-record-freq
+++ b/tools/perf/tests/attr/test-record-freq
@@ -1,6 +1,6 @@
[config]
command = record
-args = -F 100 kill >/dev/null 2>&1
+args = --no-bpf-event -F 100 kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-graph-default b/tools/perf/tests/attr/test-record-graph-default
index 0b216e69760c..5d8234d50845 100644
--- a/tools/perf/tests/attr/test-record-graph-default
+++ b/tools/perf/tests/attr/test-record-graph-default
@@ -1,6 +1,6 @@
[config]
command = record
-args = -g kill >/dev/null 2>&1
+args = --no-bpf-event -g kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-graph-dwarf b/tools/perf/tests/attr/test-record-graph-dwarf
index da2fa73bd0a2..ae92061d611d 100644
--- a/tools/perf/tests/attr/test-record-graph-dwarf
+++ b/tools/perf/tests/attr/test-record-graph-dwarf
@@ -1,6 +1,6 @@
[config]
command = record
-args = --call-graph dwarf -- kill >/dev/null 2>&1
+args = --no-bpf-event --call-graph dwarf -- kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-graph-fp b/tools/perf/tests/attr/test-record-graph-fp
index 625d190bb798..5630521c0b0f 100644
--- a/tools/perf/tests/attr/test-record-graph-fp
+++ b/tools/perf/tests/attr/test-record-graph-fp
@@ -1,6 +1,6 @@
[config]
command = record
-args = --call-graph fp kill >/dev/null 2>&1
+args = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-group b/tools/perf/tests/attr/test-record-group
index 618ba1c17474..14ee60fd3f41 100644
--- a/tools/perf/tests/attr/test-record-group
+++ b/tools/perf/tests/attr/test-record-group
@@ -1,6 +1,6 @@
[config]
command = record
-args = --group -e cycles,instructions kill >/dev/null 2>&1
+args = --no-bpf-event --group -e cycles,instructions kill >/dev/null 2>&1
ret = 1
[event-1:base-record]
diff --git a/tools/perf/tests/attr/test-record-group-sampling b/tools/perf/tests/attr/test-record-group-sampling
index f0729c454f16..300b9f7e6d69 100644
--- a/tools/perf/tests/attr/test-record-group-sampling
+++ b/tools/perf/tests/attr/test-record-group-sampling
@@ -1,6 +1,6 @@
[config]
command = record
-args = -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
+args = --no-bpf-event -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
ret = 1
[event-1:base-record]
diff --git a/tools/perf/tests/attr/test-record-group1 b/tools/perf/tests/attr/test-record-group1
index 48e8bd12fe46..3ffe246e0228 100644
--- a/tools/perf/tests/attr/test-record-group1
+++ b/tools/perf/tests/attr/test-record-group1
@@ -1,6 +1,6 @@
[config]
command = record
-args = -e '{cycles,instructions}' kill >/dev/null 2>&1
+args = --no-bpf-event -e '{cycles,instructions}' kill >/dev/null 2>&1
ret = 1
[event-1:base-record]
diff --git a/tools/perf/tests/attr/test-record-no-buffering b/tools/perf/tests/attr/test-record-no-buffering
index aa3956d8fe20..583dcbb078ba 100644
--- a/tools/perf/tests/attr/test-record-no-buffering
+++ b/tools/perf/tests/attr/test-record-no-buffering
@@ -1,6 +1,6 @@
[config]
command = record
-args = --no-buffering kill >/dev/null 2>&1
+args = --no-bpf-event --no-buffering kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-no-inherit b/tools/perf/tests/attr/test-record-no-inherit
index 560943decb87..15d1dc162e1c 100644
--- a/tools/perf/tests/attr/test-record-no-inherit
+++ b/tools/perf/tests/attr/test-record-no-inherit
@@ -1,6 +1,6 @@
[config]
command = record
-args = -i kill >/dev/null 2>&1
+args = --no-bpf-event -i kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-no-samples b/tools/perf/tests/attr/test-record-no-samples
index 8eb73ab639e0..596fbd6d5a2c 100644
--- a/tools/perf/tests/attr/test-record-no-samples
+++ b/tools/perf/tests/attr/test-record-no-samples
@@ -1,6 +1,6 @@
[config]
command = record
-args = -n kill >/dev/null 2>&1
+args = --no-bpf-event -n kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-period b/tools/perf/tests/attr/test-record-period
index 69bc748f0f27..119101154c5e 100644
--- a/tools/perf/tests/attr/test-record-period
+++ b/tools/perf/tests/attr/test-record-period
@@ -1,6 +1,6 @@
[config]
command = record
-args = -c 100 -P kill >/dev/null 2>&1
+args = --no-bpf-event -c 100 -P kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-raw b/tools/perf/tests/attr/test-record-raw
index a188a614a44c..13a5f7860c78 100644
--- a/tools/perf/tests/attr/test-record-raw
+++ b/tools/perf/tests/attr/test-record-raw
@@ -1,6 +1,6 @@
[config]
command = record
-args = -R kill >/dev/null 2>&1
+args = --no-bpf-event -R kill >/dev/null 2>&1
ret = 1
[event:base-record]
diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
index 6d598cc071ae..1a9c3becf5ff 100644
--- a/tools/perf/tests/backward-ring-buffer.c
+++ b/tools/perf/tests/backward-ring-buffer.c
@@ -18,7 +18,7 @@ static void testcase(void)
int i;
for (i = 0; i < NR_ITERS; i++) {
- char proc_name[10];
+ char proc_name[15];
snprintf(proc_name, sizeof(proc_name), "p:%d\n", i);
prctl(PR_SET_NAME, proc_name);
diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c
index a20cbc445426..57fc544aedb0 100644
--- a/tools/perf/tests/bp_account.c
+++ b/tools/perf/tests/bp_account.c
@@ -15,7 +15,6 @@
#include <sys/mman.h>
#include <linux/compiler.h>
#include <linux/hw_breakpoint.h>
-#include <sys/ioctl.h>
#include "tests.h"
#include "debug.h"
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index dbf2c69944d2..4ebd2681e760 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -15,6 +15,8 @@
#include "thread_map.h"
#include "cpumap.h"
#include "machine.h"
+#include "map.h"
+#include "symbol.h"
#include "event.h"
#include "thread.h"
diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c
index 7f6c52021e41..946ab4b63acd 100644
--- a/tools/perf/tests/dso-data.c
+++ b/tools/perf/tests/dso-data.c
@@ -304,7 +304,7 @@ int test__dso_data_cache(struct test *test __maybe_unused, int subtest __maybe_u
/* Make sure we did not leak any file descriptor. */
nr_end = open_files_cnt();
pr_debug("nr start %ld, nr stop %ld\n", nr, nr_end);
- TEST_ASSERT_VAL("failed leadking files", nr == nr_end);
+ TEST_ASSERT_VAL("failed leaking files", nr == nr_end);
return 0;
}
@@ -380,6 +380,6 @@ int test__dso_data_reopen(struct test *test __maybe_unused, int subtest __maybe_
/* Make sure we did not leak any file descriptor. */
nr_end = open_files_cnt();
pr_debug("nr start %ld, nr stop %ld\n", nr, nr_end);
- TEST_ASSERT_VAL("failed leadking files", nr == nr_end);
+ TEST_ASSERT_VAL("failed leaking files", nr == nr_end);
return 0;
}
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index 7c8d2e422401..077c306c1cae 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -10,6 +10,7 @@
#include "../util/unwind.h"
#include "perf_regs.h"
#include "map.h"
+#include "symbol.h"
#include "thread.h"
#include "callchain.h"
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index 5cbba70bcdd0..71f60c0f9faa 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -43,7 +43,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
return -1;
}
- if (perf_evsel__test_field(evsel, "prev_comm", 16, true))
+ if (perf_evsel__test_field(evsel, "prev_comm", 16, false))
ret = -1;
if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
@@ -55,7 +55,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
if (perf_evsel__test_field(evsel, "prev_state", sizeof(long), true))
ret = -1;
- if (perf_evsel__test_field(evsel, "next_comm", 16, true))
+ if (perf_evsel__test_field(evsel, "next_comm", 16, false))
ret = -1;
if (perf_evsel__test_field(evsel, "next_pid", 4, true))
@@ -73,7 +73,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
return -1;
}
- if (perf_evsel__test_field(evsel, "comm", 16, true))
+ if (perf_evsel__test_field(evsel, "comm", 16, false))
ret = -1;
if (perf_evsel__test_field(evsel, "pid", 4, true))
@@ -85,5 +85,6 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
ret = -1;
+ perf_evsel__delete(evsel);
return ret;
}
diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
index 01f0706995a9..9acc1e80b936 100644
--- a/tools/perf/tests/expr.c
+++ b/tools/perf/tests/expr.c
@@ -19,7 +19,7 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
const char *p;
const char **other;
double val;
- int ret;
+ int i, ret;
struct parse_ctx ctx;
int num_other;
@@ -56,6 +56,9 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
TEST_ASSERT_VAL("find other", !strcmp(other[1], "BAZ"));
TEST_ASSERT_VAL("find other", !strcmp(other[2], "BOZO"));
TEST_ASSERT_VAL("find other", other[3] == NULL);
+
+ for (i = 0; i < num_other; i++)
+ free((void *)other[i]);
free((void *)other);
return 0;
diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c
index b889a28fd80b..469958cd7fe0 100644
--- a/tools/perf/tests/hists_common.c
+++ b/tools/perf/tests/hists_common.c
@@ -2,6 +2,7 @@
#include <inttypes.h>
#include "perf.h"
#include "util/debug.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
@@ -161,7 +162,7 @@ out:
void print_hists_in(struct hists *hists)
{
int i = 0;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
if (hists__has(hists, need_collapse))
@@ -170,7 +171,7 @@ void print_hists_in(struct hists *hists)
root = hists->entries_in;
pr_info("----- %s --------\n", __func__);
- node = rb_first(root);
+ node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
@@ -191,13 +192,13 @@ void print_hists_in(struct hists *hists)
void print_hists_out(struct hists *hists)
{
int i = 0;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
root = &hists->entries;
pr_info("----- %s --------\n", __func__);
- node = rb_first(root);
+ node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 65fe02bebbee..7a2eed6c783e 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -2,6 +2,7 @@
#include "perf.h"
#include "util/debug.h"
#include "util/event.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
@@ -125,8 +126,8 @@ out:
static void del_hist_entries(struct hists *hists)
{
struct hist_entry *he;
- struct rb_root *root_in;
- struct rb_root *root_out;
+ struct rb_root_cached *root_in;
+ struct rb_root_cached *root_out;
struct rb_node *node;
if (hists__has(hists, need_collapse))
@@ -136,12 +137,12 @@ static void del_hist_entries(struct hists *hists)
root_out = &hists->entries;
- while (!RB_EMPTY_ROOT(root_out)) {
- node = rb_first(root_out);
+ while (!RB_EMPTY_ROOT(&root_out->rb_root)) {
+ node = rb_first_cached(root_out);
he = rb_entry(node, struct hist_entry, rb_node);
- rb_erase(node, root_out);
- rb_erase(&he->rb_node_in, root_in);
+ rb_erase_cached(node, root_out);
+ rb_erase_cached(&he->rb_node_in, root_in);
hist_entry__delete(he);
}
}
@@ -198,7 +199,7 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec
print_hists_out(hists);
}
- root = &hists->entries;
+ root = &hists->entries.rb_root;
for (node = rb_first(root), i = 0;
node && (he = rb_entry(node, struct hist_entry, rb_node));
node = rb_next(node), i++) {
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 1c5bedab3c2c..975844807fe2 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "perf.h"
#include "util/debug.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index 9a9d06cb0222..af633db63f4d 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -142,7 +142,7 @@ static int find_sample(struct sample *samples, size_t nr_samples,
static int __validate_match(struct hists *hists)
{
size_t count = 0;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
/*
@@ -153,7 +153,7 @@ static int __validate_match(struct hists *hists)
else
root = hists->entries_in;
- node = rb_first(root);
+ node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
@@ -192,7 +192,7 @@ static int __validate_link(struct hists *hists, int idx)
size_t count = 0;
size_t count_pair = 0;
size_t count_dummy = 0;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
/*
@@ -205,7 +205,7 @@ static int __validate_link(struct hists *hists, int idx)
else
root = hists->entries_in;
- node = rb_first(root);
+ node = rb_first_cached(root);
while (node) {
struct hist_entry *he;
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index faacb4f41460..0a510c524a5d 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -2,6 +2,7 @@
#include "perf.h"
#include "util/debug.h"
#include "util/event.h"
+#include "util/map.h"
#include "util/symbol.h"
#include "util/sort.h"
#include "util/evsel.h"
@@ -91,8 +92,8 @@ out:
static void del_hist_entries(struct hists *hists)
{
struct hist_entry *he;
- struct rb_root *root_in;
- struct rb_root *root_out;
+ struct rb_root_cached *root_in;
+ struct rb_root_cached *root_out;
struct rb_node *node;
if (hists__has(hists, need_collapse))
@@ -102,12 +103,12 @@ static void del_hist_entries(struct hists *hists)
root_out = &hists->entries;
- while (!RB_EMPTY_ROOT(root_out)) {
- node = rb_first(root_out);
+ while (!RB_EMPTY_ROOT(&root_out->rb_root)) {
+ node = rb_first_cached(root_out);
he = rb_entry(node, struct hist_entry, rb_node);
- rb_erase(node, root_out);
- rb_erase(&he->rb_node_in, root_in);
+ rb_erase_cached(node, root_out);
+ rb_erase_cached(&he->rb_node_in, root_in);
hist_entry__delete(he);
}
}
@@ -126,7 +127,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = NULL;
@@ -162,7 +163,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
!strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
@@ -228,7 +229,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = "overhead,cpu";
@@ -262,7 +263,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
CPU(he) == 1 && PID(he) == 100 && he->stat.period == 300);
@@ -284,7 +285,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = "comm,overhead,dso";
@@ -316,7 +317,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
!strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
@@ -358,7 +359,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = "dso,sym,comm,overhead,dso";
@@ -394,7 +395,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
!strcmp(DSO(he), "perf") && !strcmp(SYM(he), "cmd_record") &&
@@ -460,7 +461,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
int err;
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *node;
field_order = "cpu,pid,comm,dso,sym";
@@ -497,7 +498,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
}
root = &hists->entries;
- node = rb_first(root);
+ node = rb_first_cached(root);
he = rb_entry(node, struct hist_entry, rb_node);
TEST_ASSERT_VAL("Invalid hist entry",
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index e46723568516..5363a12a8b9b 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -107,7 +107,7 @@ make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
make_minimal += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1
make_minimal += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1
make_minimal += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1 NO_LIBBPF=1
-make_minimal += NO_LIBCRYPTO=1 NO_SDT=1 NO_JVMTI=1
+make_minimal += NO_LIBCRYPTO=1 NO_SDT=1 NO_JVMTI=1 NO_LIBZSTD=1
# $(run) contains all available tests
run := make_pure
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index 5ede9b561d32..ba87e6e8d18c 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -11,6 +11,7 @@
#include "tests.h"
#include "machine.h"
#include "thread_map.h"
+#include "map.h"
#include "symbol.h"
#include "thread.h"
#include "util.h"
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
index c531e6deb104..493ecb611540 100644
--- a/tools/perf/tests/openat-syscall-all-cpus.c
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -45,7 +45,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
if (IS_ERR(evsel)) {
tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
pr_debug("%s\n", errbuf);
- goto out_thread_map_delete;
+ goto out_cpu_map_delete;
}
if (perf_evsel__open(evsel, cpus, threads) < 0) {
@@ -119,6 +119,8 @@ out_close_fd:
perf_evsel__close_fd(evsel);
out_evsel_delete:
perf_evsel__delete(evsel);
+out_cpu_map_delete:
+ cpu_map__put(cpus);
out_thread_map_delete:
thread_map__put(threads);
return err;
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 3b97ac018d5a..4a69c07f4101 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1330,6 +1330,26 @@ static int test__checkevent_complex_name(struct perf_evlist *evlist)
return 0;
}
+static int test__sym_event_slash(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong type", evsel->attr.type == PERF_TYPE_HARDWARE);
+ TEST_ASSERT_VAL("wrong config", evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ return 0;
+}
+
+static int test__sym_event_dc(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong type", evsel->attr.type == PERF_TYPE_HARDWARE);
+ TEST_ASSERT_VAL("wrong config", evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ return 0;
+}
+
static int count_tracepoints(void)
{
struct dirent *events_ent;
@@ -1670,6 +1690,16 @@ static struct evlist_test test__events[] = {
.name = "cycles/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks'/Duk",
.check = test__checkevent_complex_name,
.id = 53
+ },
+ {
+ .name = "cycles//u",
+ .check = test__sym_event_slash,
+ .id = 54,
+ },
+ {
+ .name = "cycles:k",
+ .check = test__sym_event_dc,
+ .id = 55,
}
};
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index 7bedf8608fdd..14a78898d79e 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -4,7 +4,9 @@
#include "util.h"
#include "tests.h"
#include <errno.h>
+#include <stdio.h>
#include <linux/kernel.h>
+#include <linux/limits.h>
/* Simulated format definitions. */
static struct test_format {
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 0e2d00d69e6e..236ce0d6c826 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdbool.h>
#include <inttypes.h>
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include "branch.h"
#include "util.h"
#include "event.h"
#include "evsel.h"
diff --git a/tools/perf/tests/sdt.c b/tools/perf/tests/sdt.c
index 5059452d27dd..8bfaa630389c 100644
--- a/tools/perf/tests/sdt.c
+++ b/tools/perf/tests/sdt.c
@@ -3,6 +3,7 @@
#include <stdio.h>
#include <sys/epoll.h>
#include <util/evlist.h>
+#include <util/symbol.h>
#include <linux/filter.h>
#include "tests.h"
#include "debug.h"
diff --git a/tools/perf/tests/shell/lib/probe.sh b/tools/perf/tests/shell/lib/probe.sh
index 6293cc660947..e37787be672b 100644
--- a/tools/perf/tests/shell/lib/probe.sh
+++ b/tools/perf/tests/shell/lib/probe.sh
@@ -4,3 +4,8 @@ skip_if_no_perf_probe() {
perf probe 2>&1 | grep -q 'is not a perf-command' && return 2
return 0
}
+
+skip_if_no_perf_trace() {
+ perf trace -h 2>&1 | grep -q -e 'is not a perf-command' -e 'trace command not available' && return 2
+ return 0
+}
diff --git a/tools/perf/tests/shell/record+zstd_comp_decomp.sh b/tools/perf/tests/shell/record+zstd_comp_decomp.sh
new file mode 100755
index 000000000000..5dcba800109f
--- /dev/null
+++ b/tools/perf/tests/shell/record+zstd_comp_decomp.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+# Zstd perf.data compression/decompression
+
+trace_file=$(mktemp /tmp/perf.data.XXX)
+perf_tool=perf
+
+skip_if_no_z_record() {
+ $perf_tool record -h 2>&1 | grep -q '\-z, \-\-compression\-level'
+}
+
+collect_z_record() {
+ echo "Collecting compressed record file:"
+ $perf_tool record -o $trace_file -g -z -F 5000 -- \
+ dd count=500 if=/dev/random of=/dev/null
+}
+
+check_compressed_stats() {
+ echo "Checking compressed events stats:"
+ $perf_tool report -i $trace_file --header --stats | \
+ grep -E "(# compressed : Zstd,)|(COMPRESSED events:)"
+}
+
+check_compressed_output() {
+ $perf_tool inject -i $trace_file -o $trace_file.decomp &&
+ $perf_tool report -i $trace_file --stdio | head -n -3 > $trace_file.comp.output &&
+ $perf_tool report -i $trace_file.decomp --stdio | head -n -3 > $trace_file.decomp.output &&
+ diff $trace_file.comp.output $trace_file.decomp.output
+}
+
+skip_if_no_z_record || exit 2
+collect_z_record && check_compressed_stats && check_compressed_output
+err=$?
+rm -f $trace_file*
+exit $err
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 50109f27ca07..147efeb6b195 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -12,6 +12,7 @@
. $(dirname $0)/lib/probe.sh
skip_if_no_perf_probe || exit 2
+skip_if_no_perf_trace || exit 2
. $(dirname $0)/lib/probe_vfs_getname.sh
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 7691980b7df1..f101576d1c72 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -161,9 +161,16 @@ next_pair:
continue;
}
- } else
+ } else if (mem_start == kallsyms.vmlinux_map->end) {
+ /*
+ * Ignore aliases to _etext, i.e. to the end of the kernel text area,
+ * such as __indirect_thunk_end.
+ */
+ continue;
+ } else {
pr_debug("ERR : %#" PRIx64 ": %s not on kallsyms\n",
mem_start, sym->name);
+ }
err = -1;
}
diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build
index 637365099b7d..85f328ddf897 100644
--- a/tools/perf/trace/beauty/Build
+++ b/tools/perf/trace/beauty/Build
@@ -1,15 +1,15 @@
-libperf-y += clone.o
-libperf-y += fcntl.o
-libperf-y += flock.o
+perf-y += clone.o
+perf-y += fcntl.o
+perf-y += flock.o
ifeq ($(SRCARCH),$(filter $(SRCARCH),x86))
-libperf-y += ioctl.o
+perf-y += ioctl.o
endif
-libperf-y += kcmp.o
-libperf-y += mount_flags.o
-libperf-y += pkey_alloc.o
-libperf-y += arch_prctl.o
-libperf-y += prctl.o
-libperf-y += renameat.o
-libperf-y += sockaddr.o
-libperf-y += socket.o
-libperf-y += statx.o
+perf-y += kcmp.o
+perf-y += mount_flags.o
+perf-y += pkey_alloc.o
+perf-y += arch_prctl.o
+perf-y += prctl.o
+perf-y += renameat.o
+perf-y += sockaddr.o
+perf-y += socket.o
+perf-y += statx.o
diff --git a/tools/perf/trace/beauty/ioctl.c b/tools/perf/trace/beauty/ioctl.c
index 620350d41209..52242fa4072b 100644
--- a/tools/perf/trace/beauty/ioctl.c
+++ b/tools/perf/trace/beauty/ioctl.c
@@ -175,7 +175,7 @@ static size_t ioctl__scnprintf_cmd(unsigned long cmd, char *bf, size_t size, boo
size_t syscall_arg__scnprintf_ioctl_cmd(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long cmd = arg->val;
- unsigned int fd = syscall_arg__val(arg, 0);
+ int fd = syscall_arg__val(arg, 0);
struct file *file = thread__files_entry(arg->thread, fd);
if (file != NULL) {
diff --git a/tools/perf/trace/beauty/mmap_flags.sh b/tools/perf/trace/beauty/mmap_flags.sh
index 32bac9c0d694..5f5eefcb3c74 100755
--- a/tools/perf/trace/beauty/mmap_flags.sh
+++ b/tools/perf/trace/beauty/mmap_flags.sh
@@ -1,15 +1,18 @@
#!/bin/sh
# SPDX-License-Identifier: LGPL-2.1
-if [ $# -ne 2 ] ; then
+if [ $# -ne 3 ] ; then
[ $# -eq 1 ] && hostarch=$1 || hostarch=`uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/`
+ linux_header_dir=tools/include/uapi/linux
header_dir=tools/include/uapi/asm-generic
arch_header_dir=tools/arch/${hostarch}/include/uapi/asm
else
- header_dir=$1
- arch_header_dir=$2
+ linux_header_dir=$1
+ header_dir=$2
+ arch_header_dir=$3
fi
+linux_mman=${linux_header_dir}/mman.h
arch_mman=${arch_header_dir}/mman.h
# those in egrep -vw are flags, we want just the bits
@@ -20,6 +23,11 @@ egrep -q $regex ${arch_mman} && \
(egrep $regex ${arch_mman} | \
sed -r "s/$regex/\2 \1/g" | \
xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
+egrep -q $regex ${linux_mman} && \
+(egrep $regex ${linux_mman} | \
+ egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
+ sed -r "s/$regex/\2 \1/g" | \
+ xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
([ ! -f ${arch_mman} ] || egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman}) &&
(egrep $regex ${header_dir}/mman-common.h | \
egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
diff --git a/tools/perf/trace/beauty/msg_flags.c b/tools/perf/trace/beauty/msg_flags.c
index d66c66315987..ea68db08b8e7 100644
--- a/tools/perf/trace/beauty/msg_flags.c
+++ b/tools/perf/trace/beauty/msg_flags.c
@@ -29,7 +29,7 @@ static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
return scnprintf(bf, size, "NONE");
#define P_MSG_FLAG(n) \
if (flags & MSG_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~MSG_##n; \
}
diff --git a/tools/perf/trace/beauty/renameat.c b/tools/perf/trace/beauty/renameat.c
index 6dab340cc506..852d2e271833 100644
--- a/tools/perf/trace/beauty/renameat.c
+++ b/tools/perf/trace/beauty/renameat.c
@@ -2,7 +2,6 @@
// Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
#include "trace/beauty/beauty.h"
-#include <uapi/linux/fs.h>
static size_t renameat2__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
{
diff --git a/tools/perf/trace/beauty/waitid_options.c b/tools/perf/trace/beauty/waitid_options.c
index 6897fab40dcc..d4d10b33ba0e 100644
--- a/tools/perf/trace/beauty/waitid_options.c
+++ b/tools/perf/trace/beauty/waitid_options.c
@@ -11,7 +11,7 @@ static size_t syscall_arg__scnprintf_waitid_options(char *bf, size_t size,
#define P_OPTION(n) \
if (options & W##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
options &= ~W##n; \
}
diff --git a/tools/perf/trace/strace/groups/string b/tools/perf/trace/strace/groups/string
new file mode 100644
index 000000000000..c87129a3e3c4
--- /dev/null
+++ b/tools/perf/trace/strace/groups/string
@@ -0,0 +1,65 @@
+access
+acct
+add_key
+chdir
+chmod
+chown
+chroot
+creat
+delete_module
+execve
+execveat
+faccessat
+fchmodat
+fchownat
+fgetxattr
+finit_module
+fremovexattr
+fsetxattr
+futimesat
+getxattr
+inotify_add_watch
+lchown
+lgetxattr
+link
+linkat
+listxattr
+llistxattr
+lremovexattr
+lsetxattr
+lstat
+memfd_create
+mkdir
+mkdirat
+mknod
+mknodat
+mq_open
+mq_timedsend
+mq_unlink
+name_to_handle_at
+newfstatat
+open
+openat
+pivot_root
+pwrite64
+quotactl
+readlink
+readlinkat
+removexattr
+rename
+renameat
+renameat2
+request_key
+rmdir
+setxattr
+stat
+statfs
+statx
+swapoff
+swapon
+symlink
+symlinkat
+truncate
+unlink
+unlinkat
+utimensat
diff --git a/tools/perf/ui/Build b/tools/perf/ui/Build
index 0a73538c0441..3aff83c3275f 100644
--- a/tools/perf/ui/Build
+++ b/tools/perf/ui/Build
@@ -1,14 +1,14 @@
-libperf-y += setup.o
-libperf-y += helpline.o
-libperf-y += progress.o
-libperf-y += util.o
-libperf-y += hist.o
-libperf-y += stdio/hist.o
+perf-y += setup.o
+perf-y += helpline.o
+perf-y += progress.o
+perf-y += util.o
+perf-y += hist.o
+perf-y += stdio/hist.o
CFLAGS_setup.o += -DLIBDIR="BUILD_STR($(LIBDIR))"
-libperf-$(CONFIG_SLANG) += browser.o
-libperf-$(CONFIG_SLANG) += browsers/
-libperf-$(CONFIG_SLANG) += tui/
+perf-$(CONFIG_SLANG) += browser.o
+perf-$(CONFIG_SLANG) += browsers/
+perf-$(CONFIG_SLANG) += tui/
CFLAGS_browser.o += -DENABLE_SLFUTURE_CONST
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index 4f75561424ed..4ad37d8c7d6a 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -611,14 +611,16 @@ void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence)
browser->top = browser->entries;
break;
case SEEK_CUR:
- browser->top = browser->top + browser->top_idx + offset;
+ browser->top = (char **)browser->top + offset;
break;
case SEEK_END:
- browser->top = browser->top + browser->nr_entries - 1 + offset;
+ browser->top = (char **)browser->entries + browser->nr_entries - 1 + offset;
break;
default:
return;
}
+ assert((char **)browser->top < (char **)browser->entries + browser->nr_entries);
+ assert((char **)browser->top >= (char **)browser->entries);
}
unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
@@ -630,7 +632,9 @@ unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
browser->top = browser->entries;
pos = (char **)browser->top;
- while (idx < browser->nr_entries) {
+ while (idx < browser->nr_entries &&
+ row < (unsigned)SLtt_Screen_Rows - 1) {
+ assert(pos < (char **)browser->entries + browser->nr_entries);
if (!browser->filter || !browser->filter(browser, *pos)) {
ui_browser__gotorc(browser, row, 0);
browser->write(browser, pos, row);
diff --git a/tools/perf/ui/browsers/Build b/tools/perf/ui/browsers/Build
index de223f5bed58..fdf86f7981ca 100644
--- a/tools/perf/ui/browsers/Build
+++ b/tools/perf/ui/browsers/Build
@@ -1,8 +1,9 @@
-libperf-y += annotate.o
-libperf-y += hists.o
-libperf-y += map.o
-libperf-y += scripts.o
-libperf-y += header.o
+perf-y += annotate.o
+perf-y += hists.o
+perf-y += map.o
+perf-y += scripts.o
+perf-y += header.o
+perf-y += res_sample.o
CFLAGS_annotate.o += -DENABLE_SLFUTURE_CONST
CFLAGS_hists.o += -DENABLE_SLFUTURE_CONST
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 82e16bf84466..98d934a36d86 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -7,6 +7,7 @@
#include "../../util/annotate.h"
#include "../../util/hist.h"
#include "../../util/sort.h"
+#include "../../util/map.h"
#include "../../util/symbol.h"
#include "../../util/evsel.h"
#include "../../util/evlist.h"
@@ -749,7 +750,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
continue;
case 'r':
{
- script_browse(NULL);
+ script_browse(NULL, NULL);
continue;
}
case 'k':
diff --git a/tools/perf/ui/browsers/header.c b/tools/perf/ui/browsers/header.c
index d75492189acb..5aeb663dd184 100644
--- a/tools/perf/ui/browsers/header.c
+++ b/tools/perf/ui/browsers/header.c
@@ -35,7 +35,7 @@ static int list_menu__run(struct ui_browser *menu)
{
int key;
unsigned long offset;
- const char help[] =
+ static const char help[] =
"h/?/F1 Show this window\n"
"UP/DOWN/PGUP\n"
"PGDN/SPACE\n"
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index ffac1d54a3d4..3421ecbdd3f0 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -7,10 +7,14 @@
#include <string.h>
#include <linux/rbtree.h>
#include <sys/ttydefaults.h>
+#include <linux/time64.h>
+#include "../../util/callchain.h"
#include "../../util/evsel.h"
#include "../../util/evlist.h"
#include "../../util/hist.h"
+#include "../../util/map.h"
+#include "../../util/symbol.h"
#include "../../util/pstack.h"
#include "../../util/sort.h"
#include "../../util/util.h"
@@ -27,6 +31,7 @@
#include "srcline.h"
#include "string2.h"
#include "units.h"
+#include "time-utils.h"
#include "sane_ctype.h"
@@ -49,7 +54,7 @@ static int hist_browser__get_folding(struct hist_browser *browser)
struct hists *hists = browser->hists;
int unfolded_rows = 0;
- for (nd = rb_first(&hists->entries);
+ for (nd = rb_first_cached(&hists->entries);
(nd = hists__filter_entries(nd, browser->min_pcnt)) != NULL;
nd = rb_hierarchy_next(nd)) {
struct hist_entry *he =
@@ -267,7 +272,7 @@ static int hierarchy_count_rows(struct hist_browser *hb, struct hist_entry *he,
if (he->has_no_entry)
return 1;
- node = rb_first(&he->hroot_out);
+ node = rb_first_cached(&he->hroot_out);
while (node) {
float percent;
@@ -372,7 +377,7 @@ static void hist_entry__init_have_children(struct hist_entry *he)
he->has_children = !RB_EMPTY_ROOT(&he->sorted_chain);
callchain__init_have_children(&he->sorted_chain);
} else {
- he->has_children = !RB_EMPTY_ROOT(&he->hroot_out);
+ he->has_children = !RB_EMPTY_ROOT(&he->hroot_out.rb_root);
}
he->init_have_children = true;
@@ -508,7 +513,7 @@ static int hierarchy_set_folding(struct hist_browser *hb, struct hist_entry *he,
struct hist_entry *child;
int n = 0;
- for (nd = rb_first(&he->hroot_out); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&he->hroot_out); nd; nd = rb_next(nd)) {
child = rb_entry(nd, struct hist_entry, rb_node);
percent = hist_entry__get_percent_limit(child);
if (!child->filtered && percent >= hb->min_pcnt)
@@ -566,7 +571,7 @@ __hist_browser__set_folding(struct hist_browser *browser, bool unfold)
struct rb_node *nd;
struct hist_entry *he;
- nd = rb_first(&browser->hists->entries);
+ nd = rb_first_cached(&browser->hists->entries);
while (nd) {
he = rb_entry(nd, struct hist_entry, rb_node);
@@ -1221,6 +1226,8 @@ void hist_browser__init_hpp(void)
hist_browser__hpp_color_overhead_guest_us;
perf_hpp__format[PERF_HPP__OVERHEAD_ACC].color =
hist_browser__hpp_color_overhead_acc;
+
+ res_sample_init();
}
static int hist_browser__show_entry(struct hist_browser *browser,
@@ -1738,7 +1745,7 @@ static void ui_browser__hists_init_top(struct ui_browser *browser)
struct hist_browser *hb;
hb = container_of(browser, struct hist_browser, b);
- browser->top = rb_first(&hb->hists->entries);
+ browser->top = rb_first_cached(&hb->hists->entries);
}
}
@@ -2335,9 +2342,12 @@ close_file_and_continue:
}
struct popup_action {
+ unsigned long time;
struct thread *thread;
struct map_symbol ms;
int socket;
+ struct perf_evsel *evsel;
+ enum rstype rstype;
int (*fn)(struct hist_browser *browser, struct popup_action *act);
};
@@ -2524,46 +2534,137 @@ static int
do_run_script(struct hist_browser *browser __maybe_unused,
struct popup_action *act)
{
- char script_opt[64];
- memset(script_opt, 0, sizeof(script_opt));
+ char *script_opt;
+ int len;
+ int n = 0;
+ len = 100;
+ if (act->thread)
+ len += strlen(thread__comm_str(act->thread));
+ else if (act->ms.sym)
+ len += strlen(act->ms.sym->name);
+ script_opt = malloc(len);
+ if (!script_opt)
+ return -1;
+
+ script_opt[0] = 0;
if (act->thread) {
- scnprintf(script_opt, sizeof(script_opt), " -c %s ",
+ n = scnprintf(script_opt, len, " -c %s ",
thread__comm_str(act->thread));
} else if (act->ms.sym) {
- scnprintf(script_opt, sizeof(script_opt), " -S %s ",
+ n = scnprintf(script_opt, len, " -S %s ",
act->ms.sym->name);
}
- script_browse(script_opt);
+ if (act->time) {
+ char start[32], end[32];
+ unsigned long starttime = act->time;
+ unsigned long endtime = act->time + symbol_conf.time_quantum;
+
+ if (starttime == endtime) { /* Display 1ms as fallback */
+ starttime -= 1*NSEC_PER_MSEC;
+ endtime += 1*NSEC_PER_MSEC;
+ }
+ timestamp__scnprintf_usec(starttime, start, sizeof start);
+ timestamp__scnprintf_usec(endtime, end, sizeof end);
+ n += snprintf(script_opt + n, len - n, " --time %s,%s", start, end);
+ }
+
+ script_browse(script_opt, act->evsel);
+ free(script_opt);
return 0;
}
static int
-add_script_opt(struct hist_browser *browser __maybe_unused,
+do_res_sample_script(struct hist_browser *browser __maybe_unused,
+ struct popup_action *act)
+{
+ struct hist_entry *he;
+
+ he = hist_browser__selected_entry(browser);
+ res_sample_browse(he->res_samples, he->num_res, act->evsel, act->rstype);
+ return 0;
+}
+
+static int
+add_script_opt_2(struct hist_browser *browser __maybe_unused,
struct popup_action *act, char **optstr,
- struct thread *thread, struct symbol *sym)
+ struct thread *thread, struct symbol *sym,
+ struct perf_evsel *evsel, const char *tstr)
{
+
if (thread) {
- if (asprintf(optstr, "Run scripts for samples of thread [%s]",
- thread__comm_str(thread)) < 0)
+ if (asprintf(optstr, "Run scripts for samples of thread [%s]%s",
+ thread__comm_str(thread), tstr) < 0)
return 0;
} else if (sym) {
- if (asprintf(optstr, "Run scripts for samples of symbol [%s]",
- sym->name) < 0)
+ if (asprintf(optstr, "Run scripts for samples of symbol [%s]%s",
+ sym->name, tstr) < 0)
return 0;
} else {
- if (asprintf(optstr, "Run scripts for all samples") < 0)
+ if (asprintf(optstr, "Run scripts for all samples%s", tstr) < 0)
return 0;
}
act->thread = thread;
act->ms.sym = sym;
+ act->evsel = evsel;
act->fn = do_run_script;
return 1;
}
static int
+add_script_opt(struct hist_browser *browser,
+ struct popup_action *act, char **optstr,
+ struct thread *thread, struct symbol *sym,
+ struct perf_evsel *evsel)
+{
+ int n, j;
+ struct hist_entry *he;
+
+ n = add_script_opt_2(browser, act, optstr, thread, sym, evsel, "");
+
+ he = hist_browser__selected_entry(browser);
+ if (sort_order && strstr(sort_order, "time")) {
+ char tstr[128];
+
+ optstr++;
+ act++;
+ j = sprintf(tstr, " in ");
+ j += timestamp__scnprintf_usec(he->time, tstr + j,
+ sizeof tstr - j);
+ j += sprintf(tstr + j, "-");
+ timestamp__scnprintf_usec(he->time + symbol_conf.time_quantum,
+ tstr + j, sizeof tstr - j);
+ n += add_script_opt_2(browser, act, optstr, thread, sym,
+ evsel, tstr);
+ act->time = he->time;
+ }
+ return n;
+}
+
+static int
+add_res_sample_opt(struct hist_browser *browser __maybe_unused,
+ struct popup_action *act, char **optstr,
+ struct res_sample *res_sample,
+ struct perf_evsel *evsel,
+ enum rstype type)
+{
+ if (!res_sample)
+ return 0;
+
+ if (asprintf(optstr, "Show context for individual samples %s",
+ type == A_ASM ? "with assembler" :
+ type == A_SOURCE ? "with source" : "") < 0)
+ return 0;
+
+ act->fn = do_res_sample_script;
+ act->evsel = evsel;
+ act->rstype = type;
+ return 1;
+}
+
+static int
do_switch_data(struct hist_browser *browser __maybe_unused,
struct popup_action *act __maybe_unused)
{
@@ -2649,7 +2750,7 @@ add_socket_opt(struct hist_browser *browser, struct popup_action *act,
static void hist_browser__update_nr_entries(struct hist_browser *hb)
{
u64 nr_entries = 0;
- struct rb_node *nd = rb_first(&hb->hists->entries);
+ struct rb_node *nd = rb_first_cached(&hb->hists->entries);
if (hb->min_pcnt == 0 && !symbol_conf.report_hierarchy) {
hb->nr_non_filtered_entries = hb->hists->nr_non_filtered_entries;
@@ -2669,7 +2770,7 @@ static void hist_browser__update_percent_limit(struct hist_browser *hb,
double percent)
{
struct hist_entry *he;
- struct rb_node *nd = rb_first(&hb->hists->entries);
+ struct rb_node *nd = rb_first_cached(&hb->hists->entries);
u64 total = hists__total_period(hb->hists);
u64 min_callchain_hits = total * (percent / 100);
@@ -2748,7 +2849,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"S Zoom into current Processor Socket\n" \
/* help messages are sorted by lexical order of the hotkey */
- const char report_help[] = HIST_BROWSER_HELP_COMMON
+ static const char report_help[] = HIST_BROWSER_HELP_COMMON
"i Show header information\n"
"P Print histograms to perf.hist.N\n"
"r Run available scripts\n"
@@ -2756,7 +2857,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"t Zoom into current Thread\n"
"V Verbose (DSO names in callchains, etc)\n"
"/ Filter symbol by name";
- const char top_help[] = HIST_BROWSER_HELP_COMMON
+ static const char top_help[] = HIST_BROWSER_HELP_COMMON
"P Print histograms to perf.hist.N\n"
"t Zoom into current Thread\n"
"V Verbose (DSO names in callchains, etc)\n"
@@ -3028,7 +3129,7 @@ skip_annotation:
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
- thread, NULL);
+ thread, NULL, evsel);
}
/*
* Note that browser->selection != NULL
@@ -3043,11 +3144,24 @@ skip_annotation:
nr_options += add_script_opt(browser,
&actions[nr_options],
&options[nr_options],
- NULL, browser->selection->sym);
+ NULL, browser->selection->sym,
+ evsel);
}
}
nr_options += add_script_opt(browser, &actions[nr_options],
- &options[nr_options], NULL, NULL);
+ &options[nr_options], NULL, NULL, evsel);
+ nr_options += add_res_sample_opt(browser, &actions[nr_options],
+ &options[nr_options],
+ hist_browser__selected_entry(browser)->res_samples,
+ evsel, A_NORMAL);
+ nr_options += add_res_sample_opt(browser, &actions[nr_options],
+ &options[nr_options],
+ hist_browser__selected_entry(browser)->res_samples,
+ evsel, A_ASM);
+ nr_options += add_res_sample_opt(browser, &actions[nr_options],
+ &options[nr_options],
+ hist_browser__selected_entry(browser)->res_samples,
+ evsel, A_SOURCE);
nr_options += add_switch_opt(browser, &actions[nr_options],
&options[nr_options]);
skip_scripting:
diff --git a/tools/perf/ui/browsers/map.c b/tools/perf/ui/browsers/map.c
index 5b8b8c637686..c70d9337405b 100644
--- a/tools/perf/ui/browsers/map.c
+++ b/tools/perf/ui/browsers/map.c
@@ -6,6 +6,7 @@
#include <linux/bitops.h>
#include "../../util/util.h"
#include "../../util/debug.h"
+#include "../../util/map.h"
#include "../../util/symbol.h"
#include "../browser.h"
#include "../helpline.h"
diff --git a/tools/perf/ui/browsers/res_sample.c b/tools/perf/ui/browsers/res_sample.c
new file mode 100644
index 000000000000..c0dd73176d42
--- /dev/null
+++ b/tools/perf/ui/browsers/res_sample.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Display a menu with individual samples to browse with perf script */
+#include "util.h"
+#include "hist.h"
+#include "evsel.h"
+#include "hists.h"
+#include "sort.h"
+#include "config.h"
+#include "time-utils.h"
+#include <linux/time64.h>
+
+static u64 context_len = 10 * NSEC_PER_MSEC;
+
+static int res_sample_config(const char *var, const char *value, void *data __maybe_unused)
+{
+ if (!strcmp(var, "samples.context"))
+ return perf_config_u64(&context_len, var, value);
+ return 0;
+}
+
+void res_sample_init(void)
+{
+ perf_config(res_sample_config, NULL);
+}
+
+int res_sample_browse(struct res_sample *res_samples, int num_res,
+ struct perf_evsel *evsel, enum rstype rstype)
+{
+ char **names;
+ int i, n;
+ int choice;
+ char *cmd;
+ char pbuf[256], tidbuf[32], cpubuf[32];
+ const char *perf = perf_exe(pbuf, sizeof pbuf);
+ char trange[128], tsample[64];
+ struct res_sample *r;
+ char extra_format[256];
+
+ names = calloc(num_res, sizeof(char *));
+ if (!names)
+ return -1;
+ for (i = 0; i < num_res; i++) {
+ char tbuf[64];
+
+ timestamp__scnprintf_nsec(res_samples[i].time, tbuf, sizeof tbuf);
+ if (asprintf(&names[i], "%s: CPU %d tid %d", tbuf,
+ res_samples[i].cpu, res_samples[i].tid) < 0) {
+ while (--i >= 0)
+ free(names[i]);
+ free(names);
+ return -1;
+ }
+ }
+ choice = ui__popup_menu(num_res, names);
+ for (i = 0; i < num_res; i++)
+ free(names[i]);
+ free(names);
+
+ if (choice < 0 || choice >= num_res)
+ return -1;
+ r = &res_samples[choice];
+
+ n = timestamp__scnprintf_nsec(r->time - context_len, trange, sizeof trange);
+ trange[n++] = ',';
+ timestamp__scnprintf_nsec(r->time + context_len, trange + n, sizeof trange - n);
+
+ timestamp__scnprintf_nsec(r->time, tsample, sizeof tsample);
+
+ attr_to_script(extra_format, &evsel->attr);
+
+ if (asprintf(&cmd, "%s script %s%s --time %s %s%s %s%s --ns %s %s %s %s %s | less +/%s",
+ perf,
+ input_name ? "-i " : "",
+ input_name ? input_name : "",
+ trange,
+ r->cpu >= 0 ? "--cpu " : "",
+ r->cpu >= 0 ? (sprintf(cpubuf, "%d", r->cpu), cpubuf) : "",
+ r->tid ? "--tid " : "",
+ r->tid ? (sprintf(tidbuf, "%d", r->tid), tidbuf) : "",
+ extra_format,
+ rstype == A_ASM ? "-F +insn --xed" :
+ rstype == A_SOURCE ? "-F +srcline,+srccode" : "",
+ symbol_conf.inline_name ? "--inline" : "",
+ "--show-lost-events ",
+ r->tid ? "--show-switch-events --show-task-events " : "",
+ tsample) < 0)
+ return -1;
+ run_script(cmd);
+ free(cmd);
+ return 0;
+}
diff --git a/tools/perf/ui/browsers/scripts.c b/tools/perf/ui/browsers/scripts.c
index 90a32ac69e76..27cf3ab88d13 100644
--- a/tools/perf/ui/browsers/scripts.c
+++ b/tools/perf/ui/browsers/scripts.c
@@ -1,34 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
-#include <elf.h>
-#include <inttypes.h>
-#include <sys/ttydefaults.h>
-#include <string.h>
#include "../../util/sort.h"
#include "../../util/util.h"
#include "../../util/hist.h"
#include "../../util/debug.h"
#include "../../util/symbol.h"
#include "../browser.h"
-#include "../helpline.h"
#include "../libslang.h"
-
-/* 2048 lines should be enough for a script output */
-#define MAX_LINES 2048
-
-/* 160 bytes for one output line */
-#define AVERAGE_LINE_LEN 160
-
-struct script_line {
- struct list_head node;
- char line[AVERAGE_LINE_LEN];
-};
-
-struct perf_script_browser {
- struct ui_browser b;
- struct list_head entries;
- const char *script_name;
- int nr_lines;
-};
+#include "config.h"
#define SCRIPT_NAMELEN 128
#define SCRIPT_MAX_NO 64
@@ -40,149 +18,169 @@ struct perf_script_browser {
*/
#define SCRIPT_FULLPATH_LEN 256
+struct script_config {
+ const char **names;
+ char **paths;
+ int index;
+ const char *perf;
+ char extra_format[256];
+};
+
+void attr_to_script(char *extra_format, struct perf_event_attr *attr)
+{
+ extra_format[0] = 0;
+ if (attr->read_format & PERF_FORMAT_GROUP)
+ strcat(extra_format, " -F +metric");
+ if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK)
+ strcat(extra_format, " -F +brstackinsn --xed");
+ if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
+ strcat(extra_format, " -F +iregs");
+ if (attr->sample_type & PERF_SAMPLE_REGS_USER)
+ strcat(extra_format, " -F +uregs");
+ if (attr->sample_type & PERF_SAMPLE_PHYS_ADDR)
+ strcat(extra_format, " -F +phys_addr");
+}
+
+static int add_script_option(const char *name, const char *opt,
+ struct script_config *c)
+{
+ c->names[c->index] = name;
+ if (asprintf(&c->paths[c->index],
+ "%s script %s -F +metric %s %s",
+ c->perf, opt, symbol_conf.inline_name ? " --inline" : "",
+ c->extra_format) < 0)
+ return -1;
+ c->index++;
+ return 0;
+}
+
+static int scripts_config(const char *var, const char *value, void *data)
+{
+ struct script_config *c = data;
+
+ if (!strstarts(var, "scripts."))
+ return -1;
+ if (c->index >= SCRIPT_MAX_NO)
+ return -1;
+ c->names[c->index] = strdup(var + 7);
+ if (!c->names[c->index])
+ return -1;
+ if (asprintf(&c->paths[c->index], "%s %s", value,
+ c->extra_format) < 0)
+ return -1;
+ c->index++;
+ return 0;
+}
+
/*
* When success, will copy the full path of the selected script
* into the buffer pointed by script_name, and return 0.
* Return -1 on failure.
*/
-static int list_scripts(char *script_name)
+static int list_scripts(char *script_name, bool *custom,
+ struct perf_evsel *evsel)
{
- char *buf, *names[SCRIPT_MAX_NO], *paths[SCRIPT_MAX_NO];
- int i, num, choice, ret = -1;
+ char *buf, *paths[SCRIPT_MAX_NO], *names[SCRIPT_MAX_NO];
+ int i, num, choice;
+ int ret = 0;
+ int max_std, custom_perf;
+ char pbuf[256];
+ const char *perf = perf_exe(pbuf, sizeof pbuf);
+ struct script_config scriptc = {
+ .names = (const char **)names,
+ .paths = paths,
+ .perf = perf
+ };
+
+ script_name[0] = 0;
/* Preset the script name to SCRIPT_NAMELEN */
buf = malloc(SCRIPT_MAX_NO * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN));
if (!buf)
- return ret;
+ return -1;
- for (i = 0; i < SCRIPT_MAX_NO; i++) {
- names[i] = buf + i * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN);
+ if (evsel)
+ attr_to_script(scriptc.extra_format, &evsel->attr);
+ add_script_option("Show individual samples", "", &scriptc);
+ add_script_option("Show individual samples with assembler", "-F +insn --xed",
+ &scriptc);
+ add_script_option("Show individual samples with source", "-F +srcline,+srccode",
+ &scriptc);
+ perf_config(scripts_config, &scriptc);
+ custom_perf = scriptc.index;
+ add_script_option("Show samples with custom perf script arguments", "", &scriptc);
+ i = scriptc.index;
+ max_std = i;
+
+ for (; i < SCRIPT_MAX_NO; i++) {
+ names[i] = buf + (i - max_std) * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN);
paths[i] = names[i] + SCRIPT_NAMELEN;
}
- num = find_scripts(names, paths);
- if (num > 0) {
- choice = ui__popup_menu(num, names);
- if (choice < num && choice >= 0) {
- strcpy(script_name, paths[choice]);
- ret = 0;
- }
+ num = find_scripts(names + max_std, paths + max_std, SCRIPT_MAX_NO - max_std,
+ SCRIPT_FULLPATH_LEN);
+ if (num < 0)
+ num = 0;
+ choice = ui__popup_menu(num + max_std, (char * const *)names);
+ if (choice < 0) {
+ ret = -1;
+ goto out;
}
+ if (choice == custom_perf) {
+ char script_args[50];
+ int key = ui_browser__input_window("perf script command",
+ "Enter perf script command line (without perf script prefix)",
+ script_args, "", 0);
+ if (key != K_ENTER)
+ return -1;
+ sprintf(script_name, "%s script %s", perf, script_args);
+ } else if (choice < num + max_std) {
+ strcpy(script_name, paths[choice]);
+ }
+ *custom = choice >= max_std;
+out:
free(buf);
+ for (i = 0; i < max_std; i++)
+ free(paths[i]);
return ret;
}
-static void script_browser__write(struct ui_browser *browser,
- void *entry, int row)
+void run_script(char *cmd)
{
- struct script_line *sline = list_entry(entry, struct script_line, node);
- bool current_entry = ui_browser__is_current_entry(browser, row);
-
- ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
- HE_COLORSET_NORMAL);
-
- ui_browser__write_nstring(browser, sline->line, browser->width);
+ pr_debug("Running %s\n", cmd);
+ SLang_reset_tty();
+ if (system(cmd) < 0)
+ pr_warning("Cannot run %s\n", cmd);
+ /*
+ * SLang doesn't seem to reset the whole terminal, so be more
+ * forceful to get back to the original state.
+ */
+ printf("\033[c\033[H\033[J");
+ fflush(stdout);
+ SLang_init_tty(0, 0, 0);
+ SLsmg_refresh();
}
-static int script_browser__run(struct perf_script_browser *browser)
+int script_browse(const char *script_opt, struct perf_evsel *evsel)
{
- int key;
+ char *cmd, script_name[SCRIPT_FULLPATH_LEN];
+ bool custom = false;
- if (ui_browser__show(&browser->b, browser->script_name,
- "Press ESC to exit") < 0)
+ memset(script_name, 0, SCRIPT_FULLPATH_LEN);
+ if (list_scripts(script_name, &custom, evsel))
return -1;
- while (1) {
- key = ui_browser__run(&browser->b, 0);
-
- /* We can add some special key handling here if needed */
- break;
- }
-
- ui_browser__hide(&browser->b);
- return key;
-}
-
-
-int script_browse(const char *script_opt)
-{
- char cmd[SCRIPT_FULLPATH_LEN*2], script_name[SCRIPT_FULLPATH_LEN];
- char *line = NULL;
- size_t len = 0;
- ssize_t retlen;
- int ret = -1, nr_entries = 0;
- FILE *fp;
- void *buf;
- struct script_line *sline;
-
- struct perf_script_browser script = {
- .b = {
- .refresh = ui_browser__list_head_refresh,
- .seek = ui_browser__list_head_seek,
- .write = script_browser__write,
- },
- .script_name = script_name,
- };
-
- INIT_LIST_HEAD(&script.entries);
-
- /* Save each line of the output in one struct script_line object. */
- buf = zalloc((sizeof(*sline)) * MAX_LINES);
- if (!buf)
+ if (asprintf(&cmd, "%s%s %s %s%s 2>&1 | less",
+ custom ? "perf script -s " : "",
+ script_name,
+ script_opt ? script_opt : "",
+ input_name ? "-i " : "",
+ input_name ? input_name : "") < 0)
return -1;
- sline = buf;
-
- memset(script_name, 0, SCRIPT_FULLPATH_LEN);
- if (list_scripts(script_name))
- goto exit;
-
- sprintf(cmd, "perf script -s %s ", script_name);
- if (script_opt)
- strcat(cmd, script_opt);
+ run_script(cmd);
+ free(cmd);
- if (input_name) {
- strcat(cmd, " -i ");
- strcat(cmd, input_name);
- }
-
- strcat(cmd, " 2>&1");
-
- fp = popen(cmd, "r");
- if (!fp)
- goto exit;
-
- while ((retlen = getline(&line, &len, fp)) != -1) {
- strncpy(sline->line, line, AVERAGE_LINE_LEN);
-
- /* If one output line is very large, just cut it short */
- if (retlen >= AVERAGE_LINE_LEN) {
- sline->line[AVERAGE_LINE_LEN - 1] = '\0';
- sline->line[AVERAGE_LINE_LEN - 2] = '\n';
- }
- list_add_tail(&sline->node, &script.entries);
-
- if (script.b.width < retlen)
- script.b.width = retlen;
-
- if (nr_entries++ >= MAX_LINES - 1)
- break;
- sline++;
- }
-
- if (script.b.width > AVERAGE_LINE_LEN)
- script.b.width = AVERAGE_LINE_LEN;
-
- free(line);
- pclose(fp);
-
- script.nr_lines = nr_entries;
- script.b.nr_entries = nr_entries;
- script.b.entries = &script.entries;
-
- ret = script_browser__run(&script);
-exit:
- free(buf);
- return ret;
+ return 0;
}
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index 48428c9acd89..df49c9ba1785 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
#include "gtk.h"
+#include "util/sort.h"
#include "util/debug.h"
#include "util/annotate.h"
#include "util/evsel.h"
+#include "util/map.h"
+#include "util/symbol.h"
#include "ui/helpline.h"
#include <inttypes.h>
#include <signal.h>
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index 4ab663ec3e5e..0c08890f006a 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "../evlist.h"
#include "../cache.h"
+#include "../callchain.h"
#include "../evsel.h"
#include "../sort.h"
#include "../hist.h"
@@ -353,7 +354,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
g_object_unref(GTK_TREE_MODEL(store));
- for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
GtkTreeIter iter;
u64 total = hists__total_period(h->hists);
@@ -401,7 +402,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
}
static void perf_gtk__add_hierarchy_entries(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
GtkTreeStore *store,
GtkTreeIter *parent,
struct perf_hpp *hpp,
@@ -415,7 +416,7 @@ static void perf_gtk__add_hierarchy_entries(struct hists *hists,
u64 total = hists__total_period(hists);
int size;
- for (node = rb_first(root); node; node = rb_next(node)) {
+ for (node = rb_first_cached(root); node; node = rb_next(node)) {
GtkTreeIter iter;
float percent;
char *bf;
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index fe3dfaa64a91..412d6f1626e3 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -3,6 +3,7 @@
#include <math.h>
#include <linux/compiler.h>
+#include "../util/callchain.h"
#include "../util/hist.h"
#include "../util/util.h"
#include "../util/sort.h"
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 74c4ae1f0a05..a60f2993d390 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -2,8 +2,12 @@
#include <stdio.h>
#include <linux/string.h>
+#include "../../util/callchain.h"
#include "../../util/util.h"
#include "../../util/hist.h"
+#include "../../util/map.h"
+#include "../../util/map_groups.h"
+#include "../../util/symbol.h"
#include "../../util/sort.h"
#include "../../util/evsel.h"
#include "../../util/srcline.h"
@@ -788,7 +792,8 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
indent = hists__overhead_width(hists) + 4;
- for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
+ for (nd = rb_first_cached(&hists->entries); nd;
+ nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
float percent;
diff --git a/tools/perf/ui/tui/Build b/tools/perf/ui/tui/Build
index 9e4c6ca41a9f..f916df33a1a7 100644
--- a/tools/perf/ui/tui/Build
+++ b/tools/perf/ui/tui/Build
@@ -1,4 +1,4 @@
-libperf-y += setup.o
-libperf-y += util.o
-libperf-y += helpline.o
-libperf-y += progress.o
+perf-y += setup.o
+perf-y += util.o
+perf-y += helpline.o
+perf-y += progress.o
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index af72be7f5b3b..6d5bbc8b589b 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -1,158 +1,166 @@
-libperf-y += annotate.o
-libperf-y += block-range.o
-libperf-y += build-id.o
-libperf-y += config.o
-libperf-y += ctype.o
-libperf-y += db-export.o
-libperf-y += env.o
-libperf-y += event.o
-libperf-y += evlist.o
-libperf-y += evsel.o
-libperf-y += evsel_fprintf.o
-libperf-y += find_bit.o
-libperf-y += get_current_dir_name.o
-libperf-y += kallsyms.o
-libperf-y += levenshtein.o
-libperf-y += llvm-utils.o
-libperf-y += mmap.o
-libperf-y += memswap.o
-libperf-y += parse-events.o
-libperf-y += perf_regs.o
-libperf-y += path.o
-libperf-y += print_binary.o
-libperf-y += rbtree.o
-libperf-y += libstring.o
-libperf-y += bitmap.o
-libperf-y += hweight.o
-libperf-y += smt.o
-libperf-y += strbuf.o
-libperf-y += string.o
-libperf-y += strlist.o
-libperf-y += strfilter.o
-libperf-y += top.o
-libperf-y += usage.o
-libperf-y += dso.o
-libperf-y += symbol.o
-libperf-y += symbol_fprintf.o
-libperf-y += color.o
-libperf-y += metricgroup.o
-libperf-y += header.o
-libperf-y += callchain.o
-libperf-y += values.o
-libperf-y += debug.o
-libperf-y += machine.o
-libperf-y += map.o
-libperf-y += pstack.o
-libperf-y += session.o
-libperf-$(CONFIG_TRACE) += syscalltbl.o
-libperf-y += ordered-events.o
-libperf-y += namespaces.o
-libperf-y += comm.o
-libperf-y += thread.o
-libperf-y += thread_map.o
-libperf-y += trace-event-parse.o
-libperf-y += parse-events-flex.o
-libperf-y += parse-events-bison.o
-libperf-y += pmu.o
-libperf-y += pmu-flex.o
-libperf-y += pmu-bison.o
-libperf-y += trace-event-read.o
-libperf-y += trace-event-info.o
-libperf-y += trace-event-scripting.o
-libperf-y += trace-event.o
-libperf-y += svghelper.o
-libperf-y += sort.o
-libperf-y += hist.o
-libperf-y += util.o
-libperf-y += xyarray.o
-libperf-y += cpumap.o
-libperf-y += cgroup.o
-libperf-y += target.o
-libperf-y += rblist.o
-libperf-y += intlist.o
-libperf-y += vdso.o
-libperf-y += counts.o
-libperf-y += stat.o
-libperf-y += stat-shadow.o
-libperf-y += stat-display.o
-libperf-y += record.o
-libperf-y += srcline.o
-libperf-y += srccode.o
-libperf-y += data.o
-libperf-y += tsc.o
-libperf-y += cloexec.o
-libperf-y += call-path.o
-libperf-y += rwsem.o
-libperf-y += thread-stack.o
-libperf-$(CONFIG_AUXTRACE) += auxtrace.o
-libperf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
-libperf-$(CONFIG_AUXTRACE) += intel-pt.o
-libperf-$(CONFIG_AUXTRACE) += intel-bts.o
-libperf-$(CONFIG_AUXTRACE) += arm-spe.o
-libperf-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o
-libperf-$(CONFIG_AUXTRACE) += s390-cpumsf.o
+perf-y += annotate.o
+perf-y += block-range.o
+perf-y += build-id.o
+perf-y += config.o
+perf-y += ctype.o
+perf-y += db-export.o
+perf-y += env.o
+perf-y += event.o
+perf-y += evlist.o
+perf-y += evsel.o
+perf-y += evsel_fprintf.o
+perf-y += find_bit.o
+perf-y += get_current_dir_name.o
+perf-y += kallsyms.o
+perf-y += levenshtein.o
+perf-y += llvm-utils.o
+perf-y += mmap.o
+perf-y += memswap.o
+perf-y += parse-events.o
+perf-y += perf_regs.o
+perf-y += path.o
+perf-y += print_binary.o
+perf-y += rbtree.o
+perf-y += libstring.o
+perf-y += bitmap.o
+perf-y += hweight.o
+perf-y += smt.o
+perf-y += strbuf.o
+perf-y += string.o
+perf-y += strlist.o
+perf-y += strfilter.o
+perf-y += top.o
+perf-y += usage.o
+perf-y += dso.o
+perf-y += symbol.o
+perf-y += symbol_fprintf.o
+perf-y += color.o
+perf-y += color_config.o
+perf-y += metricgroup.o
+perf-y += header.o
+perf-y += callchain.o
+perf-y += values.o
+perf-y += debug.o
+perf-y += machine.o
+perf-y += map.o
+perf-y += pstack.o
+perf-y += session.o
+perf-y += sample-raw.o
+perf-y += s390-sample-raw.o
+perf-$(CONFIG_TRACE) += syscalltbl.o
+perf-y += ordered-events.o
+perf-y += namespaces.o
+perf-y += comm.o
+perf-y += thread.o
+perf-y += thread_map.o
+perf-y += trace-event-parse.o
+perf-y += parse-events-flex.o
+perf-y += parse-events-bison.o
+perf-y += pmu.o
+perf-y += pmu-flex.o
+perf-y += pmu-bison.o
+perf-y += trace-event-read.o
+perf-y += trace-event-info.o
+perf-y += trace-event-scripting.o
+perf-y += trace-event.o
+perf-y += svghelper.o
+perf-y += sort.o
+perf-y += hist.o
+perf-y += util.o
+perf-y += xyarray.o
+perf-y += cpumap.o
+perf-y += cputopo.o
+perf-y += cgroup.o
+perf-y += target.o
+perf-y += rblist.o
+perf-y += intlist.o
+perf-y += vdso.o
+perf-y += counts.o
+perf-y += stat.o
+perf-y += stat-shadow.o
+perf-y += stat-display.o
+perf-y += record.o
+perf-y += srcline.o
+perf-y += srccode.o
+perf-y += data.o
+perf-y += tsc.o
+perf-y += cloexec.o
+perf-y += call-path.o
+perf-y += rwsem.o
+perf-y += thread-stack.o
+perf-$(CONFIG_AUXTRACE) += auxtrace.o
+perf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
+perf-$(CONFIG_AUXTRACE) += intel-pt.o
+perf-$(CONFIG_AUXTRACE) += intel-bts.o
+perf-$(CONFIG_AUXTRACE) += arm-spe.o
+perf-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o
+perf-$(CONFIG_AUXTRACE) += s390-cpumsf.o
ifdef CONFIG_LIBOPENCSD
-libperf-$(CONFIG_AUXTRACE) += cs-etm.o
-libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder/
+perf-$(CONFIG_AUXTRACE) += cs-etm.o
+perf-$(CONFIG_AUXTRACE) += cs-etm-decoder/
endif
-libperf-y += parse-branch-options.o
-libperf-y += dump-insn.o
-libperf-y += parse-regs-options.o
-libperf-y += term.o
-libperf-y += help-unknown-cmd.o
-libperf-y += mem-events.o
-libperf-y += vsprintf.o
-libperf-y += drv_configs.o
-libperf-y += units.o
-libperf-y += time-utils.o
-libperf-y += expr-bison.o
-libperf-y += branch.o
-libperf-y += mem2node.o
-
-libperf-$(CONFIG_LIBBPF) += bpf-loader.o
-libperf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
-libperf-$(CONFIG_LIBELF) += symbol-elf.o
-libperf-$(CONFIG_LIBELF) += probe-file.o
-libperf-$(CONFIG_LIBELF) += probe-event.o
+perf-y += parse-branch-options.o
+perf-y += dump-insn.o
+perf-y += parse-regs-options.o
+perf-y += term.o
+perf-y += help-unknown-cmd.o
+perf-y += mem-events.o
+perf-y += vsprintf.o
+perf-y += units.o
+perf-y += time-utils.o
+perf-y += expr-bison.o
+perf-y += branch.o
+perf-y += mem2node.o
+
+perf-$(CONFIG_LIBBPF) += bpf-loader.o
+perf-$(CONFIG_LIBBPF) += bpf_map.o
+perf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
+perf-$(CONFIG_LIBELF) += symbol-elf.o
+perf-$(CONFIG_LIBELF) += probe-file.o
+perf-$(CONFIG_LIBELF) += probe-event.o
ifndef CONFIG_LIBELF
-libperf-y += symbol-minimal.o
+perf-y += symbol-minimal.o
endif
ifndef CONFIG_SETNS
-libperf-y += setns.o
+perf-y += setns.o
endif
-libperf-$(CONFIG_DWARF) += probe-finder.o
-libperf-$(CONFIG_DWARF) += dwarf-aux.o
-libperf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_DWARF) += probe-finder.o
+perf-$(CONFIG_DWARF) += dwarf-aux.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
-libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind-local.o
-libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
-libperf-$(CONFIG_LIBUNWIND_X86) += libunwind/x86_32.o
-libperf-$(CONFIG_LIBUNWIND_AARCH64) += libunwind/arm64.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind-local.o
+perf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBUNWIND_X86) += libunwind/x86_32.o
+perf-$(CONFIG_LIBUNWIND_AARCH64) += libunwind/arm64.o
-libperf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
+perf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
-libperf-y += scripting-engines/
+perf-y += scripting-engines/
-libperf-$(CONFIG_ZLIB) += zlib.o
-libperf-$(CONFIG_LZMA) += lzma.o
-libperf-y += demangle-java.o
-libperf-y += demangle-rust.o
+perf-$(CONFIG_ZLIB) += zlib.o
+perf-$(CONFIG_LZMA) += lzma.o
+perf-$(CONFIG_ZSTD) += zstd.o
+
+perf-y += demangle-java.o
+perf-y += demangle-rust.o
ifdef CONFIG_JITDUMP
-libperf-$(CONFIG_LIBELF) += jitdump.o
-libperf-$(CONFIG_LIBELF) += genelf.o
-libperf-$(CONFIG_DWARF) += genelf_debug.o
+perf-$(CONFIG_LIBELF) += jitdump.o
+perf-$(CONFIG_LIBELF) += genelf.o
+perf-$(CONFIG_DWARF) += genelf_debug.o
endif
-libperf-y += perf-hooks.o
+perf-y += perf-hooks.o
+
+perf-$(CONFIG_LIBBPF) += bpf-event.o
-libperf-$(CONFIG_CXX) += c++/
+perf-$(CONFIG_CXX) += c++/
CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_llvm-utils.o += -DPERF_INCLUDE_DIR="BUILD_STR($(perf_include_dir_SQ))"
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 70de8f6b3aee..79db038b56f2 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1,14 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
*
* Parts came from builtin-annotate.c, see those files for further
* copyright notes.
- *
- * Released under the GPL v2. (and only v2, not any later version)
*/
#include <errno.h>
#include <inttypes.h>
+#include <libgen.h>
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+#include <linux/btf.h>
#include "util.h"
#include "ui/ui.h"
#include "sort.h"
@@ -16,12 +20,14 @@
#include "color.h"
#include "config.h"
#include "cache.h"
+#include "map.h"
#include "symbol.h"
#include "units.h"
#include "debug.h"
#include "annotate.h"
#include "evsel.h"
#include "evlist.h"
+#include "bpf-event.h"
#include "block-range.h"
#include "string2.h"
#include "arch/common.h"
@@ -29,6 +35,7 @@
#include <pthread.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
+#include <bpf/libbpf.h>
/* FIXME: For the HE_COLORSET */
#include "ui/browser.h"
@@ -196,18 +203,18 @@ static void ins__delete(struct ins_operands *ops)
}
static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
- return scnprintf(bf, size, "%-6s %s", ins->name, ops->raw);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->raw);
}
int ins__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
if (ins->ops->scnprintf)
- return ins->ops->scnprintf(ins, bf, size, ops);
+ return ins->ops->scnprintf(ins, bf, size, ops, max_ins_name);
- return ins__raw_scnprintf(ins, bf, size, ops);
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
}
bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2)
@@ -271,18 +278,18 @@ indirect_call:
}
static int call__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
if (ops->target.sym)
- return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
if (ops->target.addr == 0)
- return ins__raw_scnprintf(ins, bf, size, ops);
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
if (ops->target.name)
- return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.name);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.name);
- return scnprintf(bf, size, "%-6s *%" PRIx64, ins->name, ops->target.addr);
+ return scnprintf(bf, size, "%-*s *%" PRIx64, max_ins_name, ins->name, ops->target.addr);
}
static struct ins_ops call_ops = {
@@ -386,15 +393,15 @@ static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_s
}
static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
const char *c;
if (!ops->target.addr || ops->target.offset < 0)
- return ins__raw_scnprintf(ins, bf, size, ops);
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
if (ops->target.outside && ops->target.sym != NULL)
- return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
c = strchr(ops->raw, ',');
c = validate_comma(c, ops);
@@ -413,7 +420,7 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
c++;
}
- return scnprintf(bf, size, "%-6s %.*s%" PRIx64,
+ return scnprintf(bf, size, "%-*s %.*s%" PRIx64, max_ins_name,
ins->name, c ? c - ops->raw : 0, ops->raw,
ops->target.offset);
}
@@ -481,16 +488,16 @@ out_free_ops:
}
static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
int printed;
if (ops->locked.ins.ops == NULL)
- return ins__raw_scnprintf(ins, bf, size, ops);
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
- printed = scnprintf(bf, size, "%-6s ", ins->name);
+ printed = scnprintf(bf, size, "%-*s ", max_ins_name, ins->name);
return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
- size - printed, ops->locked.ops);
+ size - printed, ops->locked.ops, max_ins_name);
}
static void lock__delete(struct ins_operands *ops)
@@ -562,9 +569,9 @@ out_free_source:
}
static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
- return scnprintf(bf, size, "%-6s %s,%s", ins->name,
+ return scnprintf(bf, size, "%-*s %s,%s", max_ins_name, ins->name,
ops->source.name ?: ops->source.raw,
ops->target.name ?: ops->target.raw);
}
@@ -602,9 +609,9 @@ static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops
}
static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops)
+ struct ins_operands *ops, int max_ins_name)
{
- return scnprintf(bf, size, "%-6s %s", ins->name,
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name,
ops->target.name ?: ops->target.raw);
}
@@ -614,9 +621,9 @@ static struct ins_ops dec_ops = {
};
static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
- struct ins_operands *ops __maybe_unused)
+ struct ins_operands *ops __maybe_unused, int max_ins_name)
{
- return scnprintf(bf, size, "%-6s", "nop");
+ return scnprintf(bf, size, "%-*s", max_ins_name, "nop");
}
static struct ins_ops nop_ops = {
@@ -1013,7 +1020,7 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64
float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
/* Hide data when there are too many overlaps. */
- if (ch->reset >= 0x7fff || ch->reset >= ch->num / 2)
+ if (ch->reset >= 0x7fff)
return;
for (offset = start; offset <= end; offset++) {
@@ -1230,12 +1237,12 @@ void disasm_line__free(struct disasm_line *dl)
annotation_line__delete(&dl->al);
}
-int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
+int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name)
{
if (raw || !dl->ins.ops)
- return scnprintf(bf, size, "%-6s %s", dl->ins.name, dl->ops.raw);
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, dl->ins.name, dl->ops.raw);
- return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
+ return ins__scnprintf(&dl->ins, bf, size, &dl->ops, max_ins_name);
}
static void annotation_line__add(struct annotation_line *al, struct list_head *head)
@@ -1613,6 +1620,9 @@ int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *
" --vmlinux vmlinux\n", build_id_msg ?: "");
}
break;
+ case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF:
+ scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation");
+ break;
default:
scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
break;
@@ -1672,6 +1682,156 @@ fallback:
return 0;
}
+#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+#define PACKAGE "perf"
+#include <bfd.h>
+#include <dis-asm.h>
+
+static int symbol__disassemble_bpf(struct symbol *sym,
+ struct annotate_args *args)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct annotation_options *opts = args->options;
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_linfo *prog_linfo = NULL;
+ struct bpf_prog_info_node *info_node;
+ int len = sym->end - sym->start;
+ disassembler_ftype disassemble;
+ struct map *map = args->ms.map;
+ struct disassemble_info info;
+ struct dso *dso = map->dso;
+ int pc = 0, count, sub_id;
+ struct btf *btf = NULL;
+ char tpath[PATH_MAX];
+ size_t buf_size;
+ int nr_skip = 0;
+ int ret = -1;
+ char *buf;
+ bfd *bfdf;
+ FILE *s;
+
+ if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO)
+ return -1;
+
+ pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__,
+ sym->name, sym->start, sym->end - sym->start);
+
+ memset(tpath, 0, sizeof(tpath));
+ perf_exe(tpath, sizeof(tpath));
+
+ bfdf = bfd_openr(tpath, NULL);
+ assert(bfdf);
+ assert(bfd_check_format(bfdf, bfd_object));
+
+ s = open_memstream(&buf, &buf_size);
+ if (!s)
+ goto out;
+ init_disassemble_info(&info, s,
+ (fprintf_ftype) fprintf);
+
+ info.arch = bfd_get_arch(bfdf);
+ info.mach = bfd_get_mach(bfdf);
+
+ info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
+ dso->bpf_prog.id);
+ if (!info_node)
+ goto out;
+ info_linear = info_node->info_linear;
+ sub_id = dso->bpf_prog.sub_id;
+
+ info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns);
+ info.buffer_length = info_linear->info.jited_prog_len;
+
+ if (info_linear->info.nr_line_info)
+ prog_linfo = bpf_prog_linfo__new(&info_linear->info);
+
+ if (info_linear->info.btf_id) {
+ struct btf_node *node;
+
+ node = perf_env__find_btf(dso->bpf_prog.env,
+ info_linear->info.btf_id);
+ if (node)
+ btf = btf__new((__u8 *)(node->data),
+ node->data_size);
+ }
+
+ disassemble_init_for_target(&info);
+
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+ disassemble = disassembler(info.arch,
+ bfd_big_endian(bfdf),
+ info.mach,
+ bfdf);
+#else
+ disassemble = disassembler(bfdf);
+#endif
+ assert(disassemble);
+
+ fflush(s);
+ do {
+ const struct bpf_line_info *linfo = NULL;
+ struct disasm_line *dl;
+ size_t prev_buf_size;
+ const char *srcline;
+ u64 addr;
+
+ addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id];
+ count = disassemble(pc, &info);
+
+ if (prog_linfo)
+ linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
+ addr, sub_id,
+ nr_skip);
+
+ if (linfo && btf) {
+ srcline = btf__name_by_offset(btf, linfo->line_off);
+ nr_skip++;
+ } else
+ srcline = NULL;
+
+ fprintf(s, "\n");
+ prev_buf_size = buf_size;
+ fflush(s);
+
+ if (!opts->hide_src_code && srcline) {
+ args->offset = -1;
+ args->line = strdup(srcline);
+ args->line_nr = 0;
+ args->ms.sym = sym;
+ dl = disasm_line__new(args);
+ if (dl) {
+ annotation_line__add(&dl->al,
+ &notes->src->source);
+ }
+ }
+
+ args->offset = pc;
+ args->line = buf + prev_buf_size;
+ args->line_nr = 0;
+ args->ms.sym = sym;
+ dl = disasm_line__new(args);
+ if (dl)
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ pc += count;
+ } while (count > 0 && pc < len);
+
+ ret = 0;
+out:
+ free(prog_linfo);
+ free(btf);
+ fclose(s);
+ bfd_close(bfdf);
+ return ret;
+}
+#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused,
+ struct annotate_args *args __maybe_unused)
+{
+ return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
+}
+#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+
static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
{
struct annotation_options *opts = args->options;
@@ -1699,7 +1859,9 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
pr_debug("annotating [%p] %30s : [%p] %30s\n",
dso, dso->long_name, sym, sym->name);
- if (dso__is_kcore(dso)) {
+ if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) {
+ return symbol__disassemble_bpf(sym, args);
+ } else if (dso__is_kcore(dso)) {
kce.kcore_filename = symfs_filename;
kce.addr = map__rip_2objdump(map, sym->start);
kce.offs = sym->start;
@@ -1889,6 +2051,7 @@ int symbol__annotate(struct symbol *sym, struct map *map,
struct annotation_options *options,
struct arch **parch)
{
+ struct annotation *notes = symbol__annotation(sym);
struct annotate_args args = {
.privsize = privsize,
.evsel = evsel,
@@ -1919,6 +2082,7 @@ int symbol__annotate(struct symbol *sym, struct map *map,
args.ms.map = map;
args.ms.sym = sym;
+ notes->start = map__rip_2objdump(map, sym->start);
return symbol__disassemble(sym, &args);
}
@@ -2410,12 +2574,30 @@ static inline int width_jumps(int n)
return 1;
}
+static int annotation__max_ins_name(struct annotation *notes)
+{
+ int max_name = 0, len;
+ struct annotation_line *al;
+
+ list_for_each_entry(al, &notes->src->source, node) {
+ if (al->offset == -1)
+ continue;
+
+ len = strlen(disasm_line(al)->ins.name);
+ if (max_name < len)
+ max_name = len;
+ }
+
+ return max_name;
+}
+
void annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
{
notes->widths.addr = notes->widths.target =
notes->widths.min_addr = hex_width(symbol__size(sym));
notes->widths.max_addr = hex_width(sym->end);
notes->widths.jumps = width_jumps(notes->max_jump_sources);
+ notes->widths.max_ins_name = annotation__max_ins_name(notes);
}
void annotation__update_column_widths(struct annotation *notes)
@@ -2579,7 +2761,7 @@ call_like:
obj__printf(obj, " ");
}
- disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset);
+ disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset, notes->widths.max_ins_name);
}
static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
@@ -2794,8 +2976,6 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *ev
symbol__calc_percent(sym, evsel);
- notes->start = map__rip_2objdump(map, sym->start);
-
annotation__set_offsets(notes, size);
annotation__mark_jump_targets(notes, sym);
annotation__compute_ipc(notes, size);
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index fb6463730ba4..5bc0cf655d37 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -4,16 +4,24 @@
#include <stdbool.h>
#include <stdint.h>
+#include <stdio.h>
#include <linux/types.h>
-#include "symbol.h"
-#include "hist.h"
-#include "sort.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include <pthread.h>
#include <asm/bug.h>
+#include "symbol_conf.h"
+struct hist_browser_timer;
+struct hist_entry;
struct ins_ops;
+struct map;
+struct map_symbol;
+struct addr_map_symbol;
+struct option;
+struct perf_sample;
+struct perf_evsel;
+struct symbol;
struct ins {
const char *name;
@@ -51,14 +59,14 @@ struct ins_ops {
void (*free)(struct ins_operands *ops);
int (*parse)(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms);
int (*scnprintf)(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops);
+ struct ins_operands *ops, int max_ins_name);
};
bool ins__is_jump(const struct ins *ins);
bool ins__is_call(const struct ins *ins);
bool ins__is_ret(const struct ins *ins);
bool ins__is_lock(const struct ins *ins);
-int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops);
+int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops, int max_ins_name);
bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2);
#define ANNOTATION__IPC_WIDTH 6
@@ -211,7 +219,7 @@ int __annotation__scnprintf_samples_period(struct annotation *notes,
struct perf_evsel *evsel,
bool show_freq);
-int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw);
+int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name);
size_t disasm__fprintf(struct list_head *head, FILE *fp);
void symbol__calc_percent(struct symbol *sym, struct perf_evsel *evsel);
@@ -281,6 +289,7 @@ struct annotation {
u8 target;
u8 min_addr;
u8 max_addr;
+ u8 max_ins_name;
} widths;
bool have_cycles;
struct annotated_source *src;
@@ -360,6 +369,7 @@ enum symbol_disassemble_errno {
__SYMBOL_ANNOTATE_ERRNO__START = -10000,
SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX = __SYMBOL_ANNOTATE_ERRNO__START,
+ SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF,
__SYMBOL_ANNOTATE_ERRNO__END,
};
diff --git a/tools/perf/util/archinsn.h b/tools/perf/util/archinsn.h
new file mode 100644
index 000000000000..448cbb6b8d7e
--- /dev/null
+++ b/tools/perf/util/archinsn.h
@@ -0,0 +1,12 @@
+#ifndef INSN_H
+#define INSN_H 1
+
+struct perf_sample;
+struct machine;
+struct thread;
+
+void arch_fetch_insn(struct perf_sample *sample,
+ struct thread *thread,
+ struct machine *machine);
+
+#endif
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index f69961c4a4f3..cfdbf65f1e02 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* auxtrace.c: AUX area trace support
* Copyright (c) 2013-2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <inttypes.h>
@@ -27,6 +18,7 @@
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/string.h>
+#include <linux/time64.h>
#include <sys/param.h>
#include <stdlib.h>
@@ -41,6 +33,7 @@
#include "pmu.h"
#include "evsel.h"
#include "cpumap.h"
+#include "symbol.h"
#include "thread_map.h"
#include "asm/bug.h"
#include "auxtrace.h"
@@ -857,7 +850,7 @@ void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
- const char *msg)
+ const char *msg, u64 timestamp)
{
size_t size;
@@ -869,7 +862,9 @@ void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
auxtrace_error->cpu = cpu;
auxtrace_error->pid = pid;
auxtrace_error->tid = tid;
+ auxtrace_error->fmt = 1;
auxtrace_error->ip = ip;
+ auxtrace_error->time = timestamp;
strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
@@ -1006,7 +1001,8 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
}
if (!str) {
- itrace_synth_opts__set_default(synth_opts, false);
+ itrace_synth_opts__set_default(synth_opts,
+ synth_opts->default_no_sample);
return 0;
}
@@ -1159,12 +1155,27 @@ static const char *auxtrace_error_name(int type)
size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
{
struct auxtrace_error_event *e = &event->auxtrace_error;
+ unsigned long long nsecs = e->time;
+ const char *msg = e->msg;
int ret;
ret = fprintf(fp, " %s error type %u",
auxtrace_error_name(e->type), e->type);
+
+ if (e->fmt && nsecs) {
+ unsigned long secs = nsecs / NSEC_PER_SEC;
+
+ nsecs -= secs * NSEC_PER_SEC;
+ ret += fprintf(fp, " time %lu.%09llu", secs, nsecs);
+ } else {
+ ret += fprintf(fp, " time 0");
+ }
+
+ if (!e->fmt)
+ msg = (const char *)&e->time;
+
ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRIx64" code %u: %s\n",
- e->cpu, e->pid, e->tid, e->ip, e->code, e->msg);
+ e->cpu, e->pid, e->tid, e->ip, e->code, msg);
return ret;
}
@@ -1278,9 +1289,9 @@ static int __auxtrace_mmap__read(struct perf_mmap *map,
}
/* padding must be written by fn() e.g. record__process_auxtrace() */
- padding = size & 7;
+ padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
if (padding)
- padding = 8 - padding;
+ padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
memset(&ev, 0, sizeof(ev));
ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
@@ -1899,7 +1910,8 @@ static struct dso *load_dso(const char *name)
if (!map)
return NULL;
- map__load(map);
+ if (map__load(map) < 0)
+ pr_err("File '%s' not found or has no symbols.\n", name);
dso = dso__get(map->dso);
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 8e50f96d4b23..d62f60eb5df4 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* auxtrace.h: AUX area trace support
* Copyright (c) 2013-2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#ifndef __PERF_AUXTRACE_H
@@ -40,6 +31,9 @@ struct record_opts;
struct auxtrace_info_event;
struct events_stats;
+/* Auxtrace records must have the same alignment as perf event records */
+#define PERF_AUXTRACE_RECORD_ALIGNMENT 8
+
enum auxtrace_type {
PERF_AUXTRACE_UNKNOWN,
PERF_AUXTRACE_INTEL_PT,
@@ -516,7 +510,7 @@ void auxtrace_index__free(struct list_head *head);
void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
int code, int cpu, pid_t pid, pid_t tid, u64 ip,
- const char *msg);
+ const char *msg, u64 timestamp);
int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
struct perf_tool *tool,
diff --git a/tools/perf/util/block-range.c b/tools/perf/util/block-range.c
index f1451c987eec..1be432657501 100644
--- a/tools/perf/util/block-range.c
+++ b/tools/perf/util/block-range.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include "block-range.h"
#include "annotate.h"
+#include <assert.h>
+#include <stdlib.h>
struct {
struct rb_root root;
diff --git a/tools/perf/util/block-range.h b/tools/perf/util/block-range.h
index a5ba719d69fb..ec0fb534bf56 100644
--- a/tools/perf/util/block-range.h
+++ b/tools/perf/util/block-range.h
@@ -2,7 +2,11 @@
#ifndef __PERF_BLOCK_RANGE_H
#define __PERF_BLOCK_RANGE_H
-#include "symbol.h"
+#include <stdbool.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+struct symbol;
/*
* struct block_range - non-overlapping parts of basic blocks
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
new file mode 100644
index 000000000000..2a4a0da35632
--- /dev/null
+++ b/tools/perf/util/bpf-event.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <stdlib.h>
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+#include <linux/btf.h>
+#include <linux/err.h>
+#include "bpf-event.h"
+#include "debug.h"
+#include "symbol.h"
+#include "machine.h"
+#include "env.h"
+#include "session.h"
+#include "map.h"
+#include "evlist.h"
+
+#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
+
+static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
+{
+ int ret = 0;
+ size_t i;
+
+ for (i = 0; i < len; i++)
+ ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
+ return ret;
+}
+
+static int machine__process_bpf_event_load(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_info_node *info_node;
+ struct perf_env *env = machine->env;
+ int id = event->bpf_event.id;
+ unsigned int i;
+
+ /* perf-record, no need to handle bpf-event */
+ if (env == NULL)
+ return 0;
+
+ info_node = perf_env__find_bpf_prog_info(env, id);
+ if (!info_node)
+ return 0;
+ info_linear = info_node->info_linear;
+
+ for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
+ u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
+ u64 addr = addrs[i];
+ struct map *map;
+
+ map = map_groups__find(&machine->kmaps, addr);
+
+ if (map) {
+ map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
+ map->dso->bpf_prog.id = id;
+ map->dso->bpf_prog.sub_id = i;
+ map->dso->bpf_prog.env = env;
+ }
+ }
+ return 0;
+}
+
+int machine__process_bpf_event(struct machine *machine __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ if (dump_trace)
+ perf_event__fprintf_bpf_event(event, stdout);
+
+ switch (event->bpf_event.type) {
+ case PERF_BPF_EVENT_PROG_LOAD:
+ return machine__process_bpf_event_load(machine, event, sample);
+
+ case PERF_BPF_EVENT_PROG_UNLOAD:
+ /*
+ * Do not free bpf_prog_info and btf of the program here,
+ * as annotation still need them. They will be freed at
+ * the end of the session.
+ */
+ break;
+ default:
+ pr_debug("unexpected bpf_event type of %d\n",
+ event->bpf_event.type);
+ break;
+ }
+ return 0;
+}
+
+static int perf_env__fetch_btf(struct perf_env *env,
+ u32 btf_id,
+ struct btf *btf)
+{
+ struct btf_node *node;
+ u32 data_size;
+ const void *data;
+
+ data = btf__get_raw_data(btf, &data_size);
+
+ node = malloc(data_size + sizeof(struct btf_node));
+ if (!node)
+ return -1;
+
+ node->id = btf_id;
+ node->data_size = data_size;
+ memcpy(node->data, data, data_size);
+
+ perf_env__insert_btf(env, node);
+ return 0;
+}
+
+static int synthesize_bpf_prog_name(char *buf, int size,
+ struct bpf_prog_info *info,
+ struct btf *btf,
+ u32 sub_id)
+{
+ u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
+ void *func_infos = (void *)(uintptr_t)(info->func_info);
+ u32 sub_prog_cnt = info->nr_jited_ksyms;
+ const struct bpf_func_info *finfo;
+ const char *short_name = NULL;
+ const struct btf_type *t;
+ int name_len;
+
+ name_len = snprintf(buf, size, "bpf_prog_");
+ name_len += snprintf_hex(buf + name_len, size - name_len,
+ prog_tags[sub_id], BPF_TAG_SIZE);
+ if (btf) {
+ finfo = func_infos + sub_id * info->func_info_rec_size;
+ t = btf__type_by_id(btf, finfo->type_id);
+ short_name = btf__name_by_offset(btf, t->name_off);
+ } else if (sub_id == 0 && sub_prog_cnt == 1) {
+ /* no subprog */
+ if (info->name[0])
+ short_name = info->name;
+ } else
+ short_name = "F";
+ if (short_name)
+ name_len += snprintf(buf + name_len, size - name_len,
+ "_%s", short_name);
+ return name_len;
+}
+
+/*
+ * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
+ * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
+ * one PERF_RECORD_KSYMBOL is generated for each sub program.
+ *
+ * Returns:
+ * 0 for success;
+ * -1 for failures;
+ * -2 for lack of kernel support.
+ */
+static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
+ perf_event__handler_t process,
+ struct machine *machine,
+ int fd,
+ union perf_event *event,
+ struct record_opts *opts)
+{
+ struct ksymbol_event *ksymbol_event = &event->ksymbol_event;
+ struct bpf_event *bpf_event = &event->bpf_event;
+ struct bpf_prog_info_linear *info_linear;
+ struct perf_tool *tool = session->tool;
+ struct bpf_prog_info_node *info_node;
+ struct bpf_prog_info *info;
+ struct btf *btf = NULL;
+ struct perf_env *env;
+ u32 sub_prog_cnt, i;
+ int err = 0;
+ u64 arrays;
+
+ /*
+ * for perf-record and perf-report use header.env;
+ * otherwise, use global perf_env.
+ */
+ env = session->data ? &session->header.env : &perf_env;
+
+ arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
+ arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
+ arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
+
+ info_linear = bpf_program__get_prog_info_linear(fd, arrays);
+ if (IS_ERR_OR_NULL(info_linear)) {
+ info_linear = NULL;
+ pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
+ return -1;
+ }
+
+ if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
+ pr_debug("%s: the kernel is too old, aborting\n", __func__);
+ return -2;
+ }
+
+ info = &info_linear->info;
+
+ /* number of ksyms, func_lengths, and tags should match */
+ sub_prog_cnt = info->nr_jited_ksyms;
+ if (sub_prog_cnt != info->nr_prog_tags ||
+ sub_prog_cnt != info->nr_jited_func_lens)
+ return -1;
+
+ /* check BTF func info support */
+ if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
+ /* btf func info number should be same as sub_prog_cnt */
+ if (sub_prog_cnt != info->nr_func_info) {
+ pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
+ err = -1;
+ goto out;
+ }
+ if (btf__get_from_id(info->btf_id, &btf)) {
+ pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
+ err = -1;
+ btf = NULL;
+ goto out;
+ }
+ perf_env__fetch_btf(env, info->btf_id, btf);
+ }
+
+ /* Synthesize PERF_RECORD_KSYMBOL */
+ for (i = 0; i < sub_prog_cnt; i++) {
+ __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
+ __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
+ int name_len;
+
+ *ksymbol_event = (struct ksymbol_event){
+ .header = {
+ .type = PERF_RECORD_KSYMBOL,
+ .size = offsetof(struct ksymbol_event, name),
+ },
+ .addr = prog_addrs[i],
+ .len = prog_lens[i],
+ .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
+ .flags = 0,
+ };
+
+ name_len = synthesize_bpf_prog_name(ksymbol_event->name,
+ KSYM_NAME_LEN, info, btf, i);
+ ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
+ sizeof(u64));
+
+ memset((void *)event + event->header.size, 0, machine->id_hdr_size);
+ event->header.size += machine->id_hdr_size;
+ err = perf_tool__process_synth_event(tool, event,
+ machine, process);
+ }
+
+ if (!opts->no_bpf_event) {
+ /* Synthesize PERF_RECORD_BPF_EVENT */
+ *bpf_event = (struct bpf_event){
+ .header = {
+ .type = PERF_RECORD_BPF_EVENT,
+ .size = sizeof(struct bpf_event),
+ },
+ .type = PERF_BPF_EVENT_PROG_LOAD,
+ .flags = 0,
+ .id = info->id,
+ };
+ memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
+ memset((void *)event + event->header.size, 0, machine->id_hdr_size);
+ event->header.size += machine->id_hdr_size;
+
+ /* save bpf_prog_info to env */
+ info_node = malloc(sizeof(struct bpf_prog_info_node));
+ if (!info_node) {
+ err = -1;
+ goto out;
+ }
+
+ info_node->info_linear = info_linear;
+ perf_env__insert_bpf_prog_info(env, info_node);
+ info_linear = NULL;
+
+ /*
+ * process after saving bpf_prog_info to env, so that
+ * required information is ready for look up
+ */
+ err = perf_tool__process_synth_event(tool, event,
+ machine, process);
+ }
+
+out:
+ free(info_linear);
+ free(btf);
+ return err ? -1 : 0;
+}
+
+int perf_event__synthesize_bpf_events(struct perf_session *session,
+ perf_event__handler_t process,
+ struct machine *machine,
+ struct record_opts *opts)
+{
+ union perf_event *event;
+ __u32 id = 0;
+ int err;
+ int fd;
+
+ event = malloc(sizeof(event->bpf_event) + KSYM_NAME_LEN + machine->id_hdr_size);
+ if (!event)
+ return -1;
+ while (true) {
+ err = bpf_prog_get_next_id(id, &id);
+ if (err) {
+ if (errno == ENOENT) {
+ err = 0;
+ break;
+ }
+ pr_debug("%s: can't get next program: %s%s\n",
+ __func__, strerror(errno),
+ errno == EINVAL ? " -- kernel too old?" : "");
+ /* don't report error on old kernel or EPERM */
+ err = (errno == EINVAL || errno == EPERM) ? 0 : -1;
+ break;
+ }
+ fd = bpf_prog_get_fd_by_id(id);
+ if (fd < 0) {
+ pr_debug("%s: failed to get fd for prog_id %u\n",
+ __func__, id);
+ continue;
+ }
+
+ err = perf_event__synthesize_one_bpf_prog(session, process,
+ machine, fd,
+ event, opts);
+ close(fd);
+ if (err) {
+ /* do not return error for old kernel */
+ if (err == -2)
+ err = 0;
+ break;
+ }
+ }
+ free(event);
+ return err;
+}
+
+static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_info_node *info_node;
+ struct btf *btf = NULL;
+ u64 arrays;
+ u32 btf_id;
+ int fd;
+
+ fd = bpf_prog_get_fd_by_id(id);
+ if (fd < 0)
+ return;
+
+ arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
+ arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
+ arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
+
+ info_linear = bpf_program__get_prog_info_linear(fd, arrays);
+ if (IS_ERR_OR_NULL(info_linear)) {
+ pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
+ goto out;
+ }
+
+ btf_id = info_linear->info.btf_id;
+
+ info_node = malloc(sizeof(struct bpf_prog_info_node));
+ if (info_node) {
+ info_node->info_linear = info_linear;
+ perf_env__insert_bpf_prog_info(env, info_node);
+ } else
+ free(info_linear);
+
+ if (btf_id == 0)
+ goto out;
+
+ if (btf__get_from_id(btf_id, &btf)) {
+ pr_debug("%s: failed to get BTF of id %u, aborting\n",
+ __func__, btf_id);
+ goto out;
+ }
+ perf_env__fetch_btf(env, btf_id, btf);
+
+out:
+ free(btf);
+ close(fd);
+}
+
+static int bpf_event__sb_cb(union perf_event *event, void *data)
+{
+ struct perf_env *env = data;
+
+ if (event->header.type != PERF_RECORD_BPF_EVENT)
+ return -1;
+
+ switch (event->bpf_event.type) {
+ case PERF_BPF_EVENT_PROG_LOAD:
+ perf_env__add_bpf_info(env, event->bpf_event.id);
+
+ case PERF_BPF_EVENT_PROG_UNLOAD:
+ /*
+ * Do not free bpf_prog_info and btf of the program here,
+ * as annotation still need them. They will be freed at
+ * the end of the session.
+ */
+ break;
+ default:
+ pr_debug("unexpected bpf_event type of %d\n",
+ event->bpf_event.type);
+ break;
+ }
+
+ return 0;
+}
+
+int bpf_event__add_sb_event(struct perf_evlist **evlist,
+ struct perf_env *env)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_DUMMY,
+ .sample_id_all = 1,
+ .watermark = 1,
+ .bpf_event = 1,
+ .size = sizeof(attr), /* to capture ABI version */
+ };
+
+ /*
+ * Older gcc versions don't support designated initializers, like above,
+ * for unnamed union members, such as the following:
+ */
+ attr.wakeup_watermark = 1;
+
+ return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
+}
+
+void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ struct perf_env *env,
+ FILE *fp)
+{
+ __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
+ __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
+ char name[KSYM_NAME_LEN];
+ struct btf *btf = NULL;
+ u32 sub_prog_cnt, i;
+
+ sub_prog_cnt = info->nr_jited_ksyms;
+ if (sub_prog_cnt != info->nr_prog_tags ||
+ sub_prog_cnt != info->nr_jited_func_lens)
+ return;
+
+ if (info->btf_id) {
+ struct btf_node *node;
+
+ node = perf_env__find_btf(env, info->btf_id);
+ if (node)
+ btf = btf__new((__u8 *)(node->data),
+ node->data_size);
+ }
+
+ if (sub_prog_cnt == 1) {
+ synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
+ fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
+ info->id, name, prog_addrs[0], prog_lens[0]);
+ return;
+ }
+
+ fprintf(fp, "# bpf_prog_info %u:\n", info->id);
+ for (i = 0; i < sub_prog_cnt; i++) {
+ synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
+
+ fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
+ i, name, prog_addrs[i], prog_lens[i]);
+ }
+}
diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
new file mode 100644
index 000000000000..04c33b3bfe28
--- /dev/null
+++ b/tools/perf/util/bpf-event.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_BPF_EVENT_H
+#define __PERF_BPF_EVENT_H
+
+#include <linux/compiler.h>
+#include <linux/rbtree.h>
+#include <pthread.h>
+#include <api/fd/array.h>
+#include "event.h"
+#include <stdio.h>
+
+struct machine;
+union perf_event;
+struct perf_env;
+struct perf_sample;
+struct record_opts;
+struct evlist;
+struct target;
+
+struct bpf_prog_info_node {
+ struct bpf_prog_info_linear *info_linear;
+ struct rb_node rb_node;
+};
+
+struct btf_node {
+ struct rb_node rb_node;
+ u32 id;
+ u32 data_size;
+ char data[];
+};
+
+#ifdef HAVE_LIBBPF_SUPPORT
+int machine__process_bpf_event(struct machine *machine, union perf_event *event,
+ struct perf_sample *sample);
+
+int perf_event__synthesize_bpf_events(struct perf_session *session,
+ perf_event__handler_t process,
+ struct machine *machine,
+ struct record_opts *opts);
+int bpf_event__add_sb_event(struct perf_evlist **evlist,
+ struct perf_env *env);
+void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
+ struct perf_env *env,
+ FILE *fp);
+#else
+static inline int machine__process_bpf_event(struct machine *machine __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused)
+{
+ return 0;
+}
+
+static inline int perf_event__synthesize_bpf_events(struct perf_session *session __maybe_unused,
+ perf_event__handler_t process __maybe_unused,
+ struct machine *machine __maybe_unused,
+ struct record_opts *opts __maybe_unused)
+{
+ return 0;
+}
+
+static inline int bpf_event__add_sb_event(struct perf_evlist **evlist __maybe_unused,
+ struct perf_env *env __maybe_unused)
+{
+ return 0;
+}
+
+static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
+ struct perf_env *env __maybe_unused,
+ FILE *fp __maybe_unused)
+{
+
+}
+#endif // HAVE_LIBBPF_SUPPORT
+#endif
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 2f3eb6d293ee..251d9ea6252f 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -15,6 +15,7 @@
#include <errno.h>
#include "perf.h"
#include "debug.h"
+#include "evlist.h"
#include "bpf-loader.h"
#include "bpf-prologue.h"
#include "probe-event.h"
@@ -24,22 +25,12 @@
#include "llvm-utils.h"
#include "c++/clang-c.h"
-#define DEFINE_PRINT_FN(name, level) \
-static int libbpf_##name(const char *fmt, ...) \
-{ \
- va_list args; \
- int ret; \
- \
- va_start(args, fmt); \
- ret = veprintf(level, verbose, pr_fmt(fmt), args);\
- va_end(args); \
- return ret; \
+static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
+ const char *fmt, va_list args)
+{
+ return veprintf(1, verbose, pr_fmt(fmt), args);
}
-DEFINE_PRINT_FN(warning, 1)
-DEFINE_PRINT_FN(info, 1)
-DEFINE_PRINT_FN(debug, 1)
-
struct bpf_prog_priv {
bool is_tp;
char *sys_name;
@@ -59,9 +50,7 @@ bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
struct bpf_object *obj;
if (!libbpf_initialized) {
- libbpf_set_print(libbpf_warning,
- libbpf_info,
- libbpf_debug);
+ libbpf_set_print(libbpf_perf_print);
libbpf_initialized = true;
}
@@ -79,9 +68,7 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
struct bpf_object *obj;
if (!libbpf_initialized) {
- libbpf_set_print(libbpf_warning,
- libbpf_info,
- libbpf_debug);
+ libbpf_set_print(libbpf_perf_print);
libbpf_initialized = true;
}
@@ -1503,7 +1490,7 @@ apply_obj_config_object(struct bpf_object *obj)
struct bpf_map *map;
int err;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
err = apply_obj_config_map(map);
if (err)
return err;
@@ -1527,7 +1514,7 @@ int bpf__apply_obj_config(void)
#define bpf__for_each_map(pos, obj, objtmp) \
bpf_object__for_each_safe(obj, objtmp) \
- bpf_map__for_each(pos, obj)
+ bpf_object__for_each_map(pos, obj)
#define bpf__for_each_map_named(pos, obj, objtmp, name) \
bpf__for_each_map(pos, obj, objtmp) \
diff --git a/tools/perf/util/bpf-loader.h b/tools/perf/util/bpf-loader.h
index 62d245a90e1d..3f46856e3330 100644
--- a/tools/perf/util/bpf-loader.h
+++ b/tools/perf/util/bpf-loader.h
@@ -8,11 +8,7 @@
#include <linux/compiler.h>
#include <linux/err.h>
-#include <string.h>
#include <bpf/libbpf.h>
-#include "probe-event.h"
-#include "evlist.h"
-#include "debug.h"
enum bpf_loader_errno {
__BPF_LOADER_ERRNO__START = __LIBBPF_ERRNO__START - 100,
@@ -44,6 +40,7 @@ enum bpf_loader_errno {
};
struct perf_evsel;
+struct perf_evlist;
struct bpf_object;
struct parse_events_term;
#define PERF_BPF_PROBE_GROUP "perf_bpf_probe"
@@ -87,6 +84,8 @@ struct perf_evsel *bpf__setup_output_event(struct perf_evlist *evlist, const cha
int bpf__strerror_setup_output_event(struct perf_evlist *evlist, int err, char *buf, size_t size);
#else
#include <errno.h>
+#include <string.h>
+#include "debug.h"
static inline struct bpf_object *
bpf__prepare_load(const char *filename __maybe_unused,
diff --git a/tools/perf/util/bpf_map.c b/tools/perf/util/bpf_map.c
new file mode 100644
index 000000000000..eb853ca67cf4
--- /dev/null
+++ b/tools/perf/util/bpf_map.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+#include "util/bpf_map.h"
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+static bool bpf_map_def__is_per_cpu(const struct bpf_map_def *def)
+{
+ return def->type == BPF_MAP_TYPE_PERCPU_HASH ||
+ def->type == BPF_MAP_TYPE_PERCPU_ARRAY ||
+ def->type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
+ def->type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE;
+}
+
+static void *bpf_map_def__alloc_value(const struct bpf_map_def *def)
+{
+ if (bpf_map_def__is_per_cpu(def))
+ return malloc(round_up(def->value_size, 8) * sysconf(_SC_NPROCESSORS_CONF));
+
+ return malloc(def->value_size);
+}
+
+int bpf_map__fprintf(struct bpf_map *map, FILE *fp)
+{
+ const struct bpf_map_def *def = bpf_map__def(map);
+ void *prev_key = NULL, *key, *value;
+ int fd = bpf_map__fd(map), err;
+ int printed = 0;
+
+ if (fd < 0)
+ return fd;
+
+ if (IS_ERR(def))
+ return PTR_ERR(def);
+
+ err = -ENOMEM;
+ key = malloc(def->key_size);
+ if (key == NULL)
+ goto out;
+
+ value = bpf_map_def__alloc_value(def);
+ if (value == NULL)
+ goto out_free_key;
+
+ while ((err = bpf_map_get_next_key(fd, prev_key, key) == 0)) {
+ int intkey = *(int *)key;
+
+ if (!bpf_map_lookup_elem(fd, key, value)) {
+ bool boolval = *(bool *)value;
+ if (boolval)
+ printed += fprintf(fp, "[%d] = %d,\n", intkey, boolval);
+ } else {
+ printed += fprintf(fp, "[%d] = ERROR,\n", intkey);
+ }
+
+ prev_key = key;
+ }
+
+ if (err == ENOENT)
+ err = printed;
+
+ free(value);
+out_free_key:
+ free(key);
+out:
+ return err;
+}
diff --git a/tools/perf/util/bpf_map.h b/tools/perf/util/bpf_map.h
new file mode 100644
index 000000000000..d6abd5e47af8
--- /dev/null
+++ b/tools/perf/util/bpf_map.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+#ifndef __PERF_BPF_MAP_H
+#define __PERF_BPF_MAP_H 1
+
+#include <stdio.h>
+#include <linux/compiler.h>
+struct bpf_map;
+
+#ifdef HAVE_LIBBPF_SUPPORT
+
+int bpf_map__fprintf(struct bpf_map *map, FILE *fp);
+
+#else
+
+static inline int bpf_map__fprintf(struct bpf_map *map __maybe_unused, FILE *fp __maybe_unused)
+{
+ return 0;
+}
+
+#endif // HAVE_LIBBPF_SUPPORT
+
+#endif // __PERF_BPF_MAP_H
diff --git a/tools/perf/util/branch.h b/tools/perf/util/branch.h
index 1e3c7c5cdc63..64f96b79f1d7 100644
--- a/tools/perf/util/branch.h
+++ b/tools/perf/util/branch.h
@@ -1,8 +1,31 @@
#ifndef _PERF_BRANCH_H
#define _PERF_BRANCH_H 1
+#include <stdio.h>
#include <stdint.h>
-#include "../perf.h"
+#include <linux/perf_event.h>
+#include <linux/types.h>
+
+struct branch_flags {
+ u64 mispred:1;
+ u64 predicted:1;
+ u64 in_tx:1;
+ u64 abort:1;
+ u64 cycles:16;
+ u64 type:4;
+ u64 reserved:40;
+};
+
+struct branch_entry {
+ u64 from;
+ u64 to;
+ struct branch_flags flags;
+};
+
+struct branch_stack {
+ u64 nr;
+ struct branch_entry entries[0];
+};
struct branch_type_stat {
bool branch_to;
@@ -13,8 +36,6 @@ struct branch_type_stat {
u64 cross_2m;
};
-struct branch_flags;
-
void branch_type_count(struct branch_type_stat *st, struct branch_flags *flags,
u64 from, u64 to);
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 04b1d53e4bf9..0c5517a8d0b7 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -15,6 +15,8 @@
#include <sys/types.h>
#include "build-id.h"
#include "event.h"
+#include "namespaces.h"
+#include "map.h"
#include "symbol.h"
#include "thread.h"
#include <linux/kernel.h>
@@ -183,6 +185,7 @@ char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size)
return bf;
}
+/* The caller is responsible to free the returned buffer. */
char *build_id_cache__origname(const char *sbuild_id)
{
char *linkname;
@@ -363,7 +366,8 @@ int perf_session__write_buildid_table(struct perf_session *session,
if (err)
return err;
- for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&session->machines.guests); nd;
+ nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__write_buildid_table(pos, fd);
if (err)
@@ -396,7 +400,8 @@ int dsos__hit_all(struct perf_session *session)
if (err)
return err;
- for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&session->machines.guests); nd;
+ nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
err = machine__hit_all_dsos(pos);
@@ -849,7 +854,8 @@ int perf_session__cache_build_ids(struct perf_session *session)
ret = machine__cache_build_ids(&session->machines.host);
- for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&session->machines.guests); nd;
+ nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__cache_build_ids(pos);
}
@@ -866,7 +872,8 @@ bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
struct rb_node *nd;
bool ret = machine__read_build_ids(&session->machines.host, with_hits);
- for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&session->machines.guests); nd;
+ nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret |= machine__read_build_ids(pos, with_hits);
}
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index f0c565164a97..93668f38f1ed 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -6,9 +6,10 @@
#define SBUILD_ID_SIZE (BUILD_ID_SIZE * 2 + 1)
#include "tool.h"
-#include "namespaces.h"
#include <linux/types.h>
+struct nsinfo;
+
extern struct perf_tool build_id__mark_dso_hit_ops;
struct dso;
struct feat_fd;
diff --git a/tools/perf/util/c++/Build b/tools/perf/util/c++/Build
index 988fef1b11d7..613ecfd76527 100644
--- a/tools/perf/util/c++/Build
+++ b/tools/perf/util/c++/Build
@@ -1,2 +1,2 @@
-libperf-$(CONFIG_CLANGLLVM) += clang.o
-libperf-$(CONFIG_CLANGLLVM) += clang-test.o
+perf-$(CONFIG_CLANGLLVM) += clang.o
+perf-$(CONFIG_CLANGLLVM) += clang-test.o
diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp
index 39c0004f2886..fc361c3f8570 100644
--- a/tools/perf/util/c++/clang.cpp
+++ b/tools/perf/util/c++/clang.cpp
@@ -156,7 +156,7 @@ getBPFObjectFromModule(llvm::Module *Module)
#endif
if (NotAdded) {
llvm::errs() << "TargetMachine can't emit a file of this type\n";
- return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);;
+ return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);
}
PM.run(*Module);
diff --git a/tools/perf/util/call-path.c b/tools/perf/util/call-path.c
index 904a17052e38..c5b90300304d 100644
--- a/tools/perf/util/call-path.c
+++ b/tools/perf/util/call-path.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* call-path.h: Manipulate a tree data structure containing function call paths
* Copyright (c) 2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <linux/rbtree.h>
diff --git a/tools/perf/util/call-path.h b/tools/perf/util/call-path.h
index 477f6d03b659..6b3229106f16 100644
--- a/tools/perf/util/call-path.h
+++ b/tools/perf/util/call-path.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* call-path.h: Manipulate a tree data structure containing function call paths
* Copyright (c) 2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#ifndef __PERF_CALL_PATH_H
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index dc2212e12184..abb608b09269 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -23,8 +23,10 @@
#include "util.h"
#include "sort.h"
#include "machine.h"
+#include "map.h"
#include "callchain.h"
#include "branch.h"
+#include "symbol.h"
#define CALLCHAIN_PARAM_DEFAULT \
.mode = CHAIN_GRAPH_ABS, \
@@ -1577,3 +1579,18 @@ int callchain_cursor__copy(struct callchain_cursor *dst,
return rc;
}
+
+/*
+ * Initialize a cursor before adding entries inside, but keep
+ * the previously allocated entries as a cache.
+ */
+void callchain_cursor_reset(struct callchain_cursor *cursor)
+{
+ struct callchain_cursor_node *node;
+
+ cursor->nr = 0;
+ cursor->last = &cursor->first;
+
+ for (node = cursor->first; node != NULL; node = node->next)
+ map__zput(node->map);
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 99d38ac019b8..80e056a3d882 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -2,14 +2,14 @@
#ifndef __PERF_CALLCHAIN_H
#define __PERF_CALLCHAIN_H
-#include "../perf.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include "event.h"
-#include "map.h"
-#include "symbol.h"
+#include "map_symbol.h"
#include "branch.h"
+struct map;
+
#define HELP_PAD "\t\t\t\t"
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace):\n\n"
@@ -188,20 +188,7 @@ int callchain_append(struct callchain_root *root,
int callchain_merge(struct callchain_cursor *cursor,
struct callchain_root *dst, struct callchain_root *src);
-/*
- * Initialize a cursor before adding entries inside, but keep
- * the previously allocated entries as a cache.
- */
-static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
-{
- struct callchain_cursor_node *node;
-
- cursor->nr = 0;
- cursor->last = &cursor->first;
-
- for (node = cursor->first; node != NULL; node = node->next)
- map__zput(node->map);
-}
+void callchain_cursor_reset(struct callchain_cursor *cursor);
int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
struct map *map, struct symbol *sym,
diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
index ca0fff6272be..06f48312c5ed 100644
--- a/tools/perf/util/cloexec.c
+++ b/tools/perf/util/cloexec.c
@@ -7,7 +7,6 @@
#include "asm/bug.h"
#include "debug.h"
#include <unistd.h>
-#include <asm/unistd.h>
#include <sys/syscall.h>
static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index 39e628b8938e..39b8c4ec4e2e 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include "cache.h"
-#include "config.h"
#include <stdlib.h>
#include <stdio.h>
#include "color.h"
@@ -10,44 +9,6 @@
int perf_use_color_default = -1;
-int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
-{
- if (value) {
- if (!strcasecmp(value, "never"))
- return 0;
- if (!strcasecmp(value, "always"))
- return 1;
- if (!strcasecmp(value, "auto"))
- goto auto_color;
- }
-
- /* Missing or explicit false to turn off colorization */
- if (!perf_config_bool(var, value))
- return 0;
-
- /* any normal truth value defaults to 'auto' */
- auto_color:
- if (stdout_is_tty < 0)
- stdout_is_tty = isatty(1);
- if (stdout_is_tty || pager_in_use()) {
- char *term = getenv("TERM");
- if (term && strcmp(term, "dumb"))
- return 1;
- }
- return 0;
-}
-
-int perf_color_default_config(const char *var, const char *value,
- void *cb __maybe_unused)
-{
- if (!strcmp(var, "color.ui")) {
- perf_use_color_default = perf_config_colorbool(var, value, -1);
- return 0;
- }
-
- return 0;
-}
-
static int __color_vsnprintf(char *bf, size_t size, const char *color,
const char *fmt, va_list args, const char *trail)
{
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index 22777b1812ee..01f7bed21c9b 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -3,6 +3,7 @@
#define __PERF_COLOR_H
#include <stdio.h>
+#include <stdarg.h>
/* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */
#define COLOR_MAXLEN 24
diff --git a/tools/perf/util/color_config.c b/tools/perf/util/color_config.c
new file mode 100644
index 000000000000..817dc56e7e95
--- /dev/null
+++ b/tools/perf/util/color_config.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include "cache.h"
+#include "config.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include "color.h"
+#include <math.h>
+#include <unistd.h>
+
+int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
+{
+ if (value) {
+ if (!strcasecmp(value, "never"))
+ return 0;
+ if (!strcasecmp(value, "always"))
+ return 1;
+ if (!strcasecmp(value, "auto"))
+ goto auto_color;
+ }
+
+ /* Missing or explicit false to turn off colorization */
+ if (!perf_config_bool(var, value))
+ return 0;
+
+ /* any normal truth value defaults to 'auto' */
+ auto_color:
+ if (stdout_is_tty < 0)
+ stdout_is_tty = isatty(1);
+ if (stdout_is_tty || pager_in_use()) {
+ char *term = getenv("TERM");
+ if (term && strcmp(term, "dumb"))
+ return 1;
+ }
+ return 0;
+}
+
+int perf_color_default_config(const char *var, const char *value,
+ void *cb __maybe_unused)
+{
+ if (!strcmp(var, "color.ui")) {
+ perf_use_color_default = perf_config_colorbool(var, value, -1);
+ return 0;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c
index 31279a7bd919..1066de92af12 100644
--- a/tools/perf/util/comm.c
+++ b/tools/perf/util/comm.c
@@ -6,6 +6,7 @@
#include <stdio.h>
#include <string.h>
#include <linux/refcount.h>
+#include <linux/rbtree.h>
#include "rwsem.h"
struct comm_str {
diff --git a/tools/perf/util/comm.h b/tools/perf/util/comm.h
index 3e5c438fe85e..f35d8fbfa2dd 100644
--- a/tools/perf/util/comm.h
+++ b/tools/perf/util/comm.h
@@ -2,9 +2,9 @@
#ifndef __PERF_COMM_H
#define __PERF_COMM_H
-#include "../perf.h"
-#include <linux/rbtree.h>
#include <linux/list.h>
+#include <linux/types.h>
+#include <stdbool.h>
struct comm_str;
diff --git a/tools/perf/util/compress.h b/tools/perf/util/compress.h
index 892e92e7e7fc..0cd3369af2a4 100644
--- a/tools/perf/util/compress.h
+++ b/tools/perf/util/compress.h
@@ -2,6 +2,11 @@
#ifndef PERF_COMPRESS_H
#define PERF_COMPRESS_H
+#include <stdbool.h>
+#ifdef HAVE_ZSTD_SUPPORT
+#include <zstd.h>
+#endif
+
#ifdef HAVE_ZLIB_SUPPORT
int gzip_decompress_to_file(const char *input, int output_fd);
bool gzip_is_compressed(const char *input);
@@ -12,4 +17,52 @@ int lzma_decompress_to_file(const char *input, int output_fd);
bool lzma_is_compressed(const char *input);
#endif
+struct zstd_data {
+#ifdef HAVE_ZSTD_SUPPORT
+ ZSTD_CStream *cstream;
+ ZSTD_DStream *dstream;
+#endif
+};
+
+#ifdef HAVE_ZSTD_SUPPORT
+
+int zstd_init(struct zstd_data *data, int level);
+int zstd_fini(struct zstd_data *data);
+
+size_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size,
+ void *src, size_t src_size, size_t max_record_size,
+ size_t process_header(void *record, size_t increment));
+
+size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size,
+ void *dst, size_t dst_size);
+#else /* !HAVE_ZSTD_SUPPORT */
+
+static inline int zstd_init(struct zstd_data *data __maybe_unused, int level __maybe_unused)
+{
+ return 0;
+}
+
+static inline int zstd_fini(struct zstd_data *data __maybe_unused)
+{
+ return 0;
+}
+
+static inline
+size_t zstd_compress_stream_to_records(struct zstd_data *data __maybe_unused,
+ void *dst __maybe_unused, size_t dst_size __maybe_unused,
+ void *src __maybe_unused, size_t src_size __maybe_unused,
+ size_t max_record_size __maybe_unused,
+ size_t process_header(void *record, size_t increment) __maybe_unused)
+{
+ return 0;
+}
+
+static inline size_t zstd_decompress_stream(struct zstd_data *data __maybe_unused, void *src __maybe_unused,
+ size_t src_size __maybe_unused, void *dst __maybe_unused,
+ size_t dst_size __maybe_unused)
+{
+ return 0;
+}
+#endif
+
#endif /* PERF_COMPRESS_H */
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 1ea8f898f1a1..7e3c1b60120c 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -13,6 +13,7 @@
#include <sys/param.h>
#include "util.h"
#include "cache.h"
+#include "callchain.h"
#include <subcmd/exec-cmd.h>
#include "util/event.h" /* proc_map_timeout */
#include "util/hist.h" /* perf_hist_config */
@@ -632,11 +633,10 @@ static int collect_config(const char *var, const char *value,
}
ret = set_value(item, value);
- return ret;
out_free:
free(key);
- return -1;
+ return ret;
}
int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
diff --git a/tools/perf/util/cpu-set-sched.h b/tools/perf/util/cpu-set-sched.h
new file mode 100644
index 000000000000..8cf4e40d322a
--- /dev/null
+++ b/tools/perf/util/cpu-set-sched.h
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: LGPL-2.1
+// Definitions taken from glibc for use with older systems, same licensing.
+#ifndef _CPU_SET_SCHED_PERF_H
+#define _CPU_SET_SCHED_PERF_H
+
+#include <features.h>
+#include <sched.h>
+
+#ifndef CPU_EQUAL
+#ifndef __CPU_EQUAL_S
+#if __GNUC_PREREQ (2, 91)
+# define __CPU_EQUAL_S(setsize, cpusetp1, cpusetp2) \
+ (__builtin_memcmp (cpusetp1, cpusetp2, setsize) == 0)
+#else
+# define __CPU_EQUAL_S(setsize, cpusetp1, cpusetp2) \
+ (__extension__ \
+ ({ const __cpu_mask *__arr1 = (cpusetp1)->__bits; \
+ const __cpu_mask *__arr2 = (cpusetp2)->__bits; \
+ size_t __imax = (setsize) / sizeof (__cpu_mask); \
+ size_t __i; \
+ for (__i = 0; __i < __imax; ++__i) \
+ if (__arr1[__i] != __arr2[__i]) \
+ break; \
+ __i == __imax; }))
+#endif
+#endif // __CPU_EQUAL_S
+
+#define CPU_EQUAL(cpusetp1, cpusetp2) \
+ __CPU_EQUAL_S (sizeof (cpu_set_t), cpusetp1, cpusetp2)
+#endif // CPU_EQUAL
+
+#ifndef CPU_OR
+#ifndef __CPU_OP_S
+#define __CPU_OP_S(setsize, destset, srcset1, srcset2, op) \
+ (__extension__ \
+ ({ cpu_set_t *__dest = (destset); \
+ const __cpu_mask *__arr1 = (srcset1)->__bits; \
+ const __cpu_mask *__arr2 = (srcset2)->__bits; \
+ size_t __imax = (setsize) / sizeof (__cpu_mask); \
+ size_t __i; \
+ for (__i = 0; __i < __imax; ++__i) \
+ ((__cpu_mask *) __dest->__bits)[__i] = __arr1[__i] op __arr2[__i]; \
+ __dest; }))
+#endif // __CPU_OP_S
+
+#define CPU_OR(destset, srcset1, srcset2) \
+ __CPU_OP_S (sizeof (cpu_set_t), destset, srcset1, srcset2, |)
+#endif // CPU_OR
+
+#endif // _CPU_SET_SCHED_PERF_H
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 383674f448fc..0b599229bc7e 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -681,7 +681,7 @@ size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size)
#undef COMMA
- pr_debug("cpumask list: %s\n", buf);
+ pr_debug2("cpumask list: %s\n", buf);
return ret;
}
@@ -730,3 +730,13 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
buf[size - 1] = '\0';
return ptr - buf;
}
+
+const struct cpu_map *cpu_map__online(void) /* thread unsafe */
+{
+ static const struct cpu_map *online = NULL;
+
+ if (!online)
+ online = cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */
+
+ return online;
+}
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index ed8999d1a640..f00ce624b9f7 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -29,6 +29,7 @@ int cpu_map__get_core_id(int cpu);
int cpu_map__get_core(struct cpu_map *map, int idx, void *data);
int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp);
int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep);
+const struct cpu_map *cpu_map__online(void); /* thread unsafe */
struct cpu_map *cpu_map__get(struct cpu_map *map);
void cpu_map__put(struct cpu_map *map);
diff --git a/tools/perf/util/cputopo.c b/tools/perf/util/cputopo.c
new file mode 100644
index 000000000000..ece0710249d4
--- /dev/null
+++ b/tools/perf/util/cputopo.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <sys/param.h>
+#include <inttypes.h>
+#include <api/fs/fs.h>
+
+#include "cputopo.h"
+#include "cpumap.h"
+#include "util.h"
+#include "env.h"
+
+
+#define CORE_SIB_FMT \
+ "%s/devices/system/cpu/cpu%d/topology/core_siblings_list"
+#define THRD_SIB_FMT \
+ "%s/devices/system/cpu/cpu%d/topology/thread_siblings_list"
+#define NODE_ONLINE_FMT \
+ "%s/devices/system/node/online"
+#define NODE_MEMINFO_FMT \
+ "%s/devices/system/node/node%d/meminfo"
+#define NODE_CPULIST_FMT \
+ "%s/devices/system/node/node%d/cpulist"
+
+static int build_cpu_topology(struct cpu_topology *tp, int cpu)
+{
+ FILE *fp;
+ char filename[MAXPATHLEN];
+ char *buf = NULL, *p;
+ size_t len = 0;
+ ssize_t sret;
+ u32 i = 0;
+ int ret = -1;
+
+ scnprintf(filename, MAXPATHLEN, CORE_SIB_FMT,
+ sysfs__mountpoint(), cpu);
+ fp = fopen(filename, "r");
+ if (!fp)
+ goto try_threads;
+
+ sret = getline(&buf, &len, fp);
+ fclose(fp);
+ if (sret <= 0)
+ goto try_threads;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ for (i = 0; i < tp->core_sib; i++) {
+ if (!strcmp(buf, tp->core_siblings[i]))
+ break;
+ }
+ if (i == tp->core_sib) {
+ tp->core_siblings[i] = buf;
+ tp->core_sib++;
+ buf = NULL;
+ len = 0;
+ }
+ ret = 0;
+
+try_threads:
+ scnprintf(filename, MAXPATHLEN, THRD_SIB_FMT,
+ sysfs__mountpoint(), cpu);
+ fp = fopen(filename, "r");
+ if (!fp)
+ goto done;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto done;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ for (i = 0; i < tp->thread_sib; i++) {
+ if (!strcmp(buf, tp->thread_siblings[i]))
+ break;
+ }
+ if (i == tp->thread_sib) {
+ tp->thread_siblings[i] = buf;
+ tp->thread_sib++;
+ buf = NULL;
+ }
+ ret = 0;
+done:
+ if (fp)
+ fclose(fp);
+ free(buf);
+ return ret;
+}
+
+void cpu_topology__delete(struct cpu_topology *tp)
+{
+ u32 i;
+
+ if (!tp)
+ return;
+
+ for (i = 0 ; i < tp->core_sib; i++)
+ zfree(&tp->core_siblings[i]);
+
+ for (i = 0 ; i < tp->thread_sib; i++)
+ zfree(&tp->thread_siblings[i]);
+
+ free(tp);
+}
+
+struct cpu_topology *cpu_topology__new(void)
+{
+ struct cpu_topology *tp = NULL;
+ void *addr;
+ u32 nr, i;
+ size_t sz;
+ long ncpus;
+ int ret = -1;
+ struct cpu_map *map;
+
+ ncpus = cpu__max_present_cpu();
+
+ /* build online CPU map */
+ map = cpu_map__new(NULL);
+ if (map == NULL) {
+ pr_debug("failed to get system cpumap\n");
+ return NULL;
+ }
+
+ nr = (u32)(ncpus & UINT_MAX);
+
+ sz = nr * sizeof(char *);
+ addr = calloc(1, sizeof(*tp) + 2 * sz);
+ if (!addr)
+ goto out_free;
+
+ tp = addr;
+ addr += sizeof(*tp);
+ tp->core_siblings = addr;
+ addr += sz;
+ tp->thread_siblings = addr;
+
+ for (i = 0; i < nr; i++) {
+ if (!cpu_map__has(map, i))
+ continue;
+
+ ret = build_cpu_topology(tp, i);
+ if (ret < 0)
+ break;
+ }
+
+out_free:
+ cpu_map__put(map);
+ if (ret) {
+ cpu_topology__delete(tp);
+ tp = NULL;
+ }
+ return tp;
+}
+
+static int load_numa_node(struct numa_topology_node *node, int nr)
+{
+ char str[MAXPATHLEN];
+ char field[32];
+ char *buf = NULL, *p;
+ size_t len = 0;
+ int ret = -1;
+ FILE *fp;
+ u64 mem;
+
+ node->node = (u32) nr;
+
+ scnprintf(str, MAXPATHLEN, NODE_MEMINFO_FMT,
+ sysfs__mountpoint(), nr);
+ fp = fopen(str, "r");
+ if (!fp)
+ return -1;
+
+ while (getline(&buf, &len, fp) > 0) {
+ /* skip over invalid lines */
+ if (!strchr(buf, ':'))
+ continue;
+ if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
+ goto err;
+ if (!strcmp(field, "MemTotal:"))
+ node->mem_total = mem;
+ if (!strcmp(field, "MemFree:"))
+ node->mem_free = mem;
+ if (node->mem_total && node->mem_free)
+ break;
+ }
+
+ fclose(fp);
+ fp = NULL;
+
+ scnprintf(str, MAXPATHLEN, NODE_CPULIST_FMT,
+ sysfs__mountpoint(), nr);
+
+ fp = fopen(str, "r");
+ if (!fp)
+ return -1;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto err;
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ node->cpus = buf;
+ fclose(fp);
+ return 0;
+
+err:
+ free(buf);
+ if (fp)
+ fclose(fp);
+ return ret;
+}
+
+struct numa_topology *numa_topology__new(void)
+{
+ struct cpu_map *node_map = NULL;
+ struct numa_topology *tp = NULL;
+ char path[MAXPATHLEN];
+ char *buf = NULL;
+ size_t len = 0;
+ u32 nr, i;
+ FILE *fp;
+ char *c;
+
+ scnprintf(path, MAXPATHLEN, NODE_ONLINE_FMT,
+ sysfs__mountpoint());
+
+ fp = fopen(path, "r");
+ if (!fp)
+ return NULL;
+
+ if (getline(&buf, &len, fp) <= 0)
+ goto out;
+
+ c = strchr(buf, '\n');
+ if (c)
+ *c = '\0';
+
+ node_map = cpu_map__new(buf);
+ if (!node_map)
+ goto out;
+
+ nr = (u32) node_map->nr;
+
+ tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr);
+ if (!tp)
+ goto out;
+
+ tp->nr = nr;
+
+ for (i = 0; i < nr; i++) {
+ if (load_numa_node(&tp->nodes[i], node_map->map[i])) {
+ numa_topology__delete(tp);
+ tp = NULL;
+ break;
+ }
+ }
+
+out:
+ free(buf);
+ fclose(fp);
+ cpu_map__put(node_map);
+ return tp;
+}
+
+void numa_topology__delete(struct numa_topology *tp)
+{
+ u32 i;
+
+ for (i = 0; i < tp->nr; i++)
+ free(tp->nodes[i].cpus);
+
+ free(tp);
+}
diff --git a/tools/perf/util/cputopo.h b/tools/perf/util/cputopo.h
new file mode 100644
index 000000000000..47a97e71acdf
--- /dev/null
+++ b/tools/perf/util/cputopo.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_CPUTOPO_H
+#define __PERF_CPUTOPO_H
+
+#include <linux/types.h>
+#include "env.h"
+
+struct cpu_topology {
+ u32 core_sib;
+ u32 thread_sib;
+ char **core_siblings;
+ char **thread_siblings;
+};
+
+struct numa_topology_node {
+ char *cpus;
+ u32 node;
+ u64 mem_total;
+ u64 mem_free;
+};
+
+struct numa_topology {
+ u32 nr;
+ struct numa_topology_node nodes[0];
+};
+
+struct cpu_topology *cpu_topology__new(void);
+void cpu_topology__delete(struct cpu_topology *tp);
+
+struct numa_topology *numa_topology__new(void);
+void numa_topology__delete(struct numa_topology *tp);
+
+#endif /* __PERF_CPUTOPO_H */
diff --git a/tools/perf/util/cs-etm-decoder/Build b/tools/perf/util/cs-etm-decoder/Build
index bc22c39c727f..216cb17a3322 100644
--- a/tools/perf/util/cs-etm-decoder/Build
+++ b/tools/perf/util/cs-etm-decoder/Build
@@ -1 +1 @@
-libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder.o
+perf-$(CONFIG_AUXTRACE) += cs-etm-decoder.o
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 8c155575c6c5..39fe21e1cf93 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -290,6 +290,12 @@ static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder)
decoder->packet_buffer[i].instr_count = 0;
decoder->packet_buffer[i].last_instr_taken_branch = false;
decoder->packet_buffer[i].last_instr_size = 0;
+ decoder->packet_buffer[i].last_instr_type = 0;
+ decoder->packet_buffer[i].last_instr_subtype = 0;
+ decoder->packet_buffer[i].last_instr_cond = 0;
+ decoder->packet_buffer[i].flags = 0;
+ decoder->packet_buffer[i].exception_number = UINT32_MAX;
+ decoder->packet_buffer[i].trace_chan_id = UINT8_MAX;
decoder->packet_buffer[i].cpu = INT_MIN;
}
}
@@ -300,14 +306,12 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
enum cs_etm_sample_type sample_type)
{
u32 et = 0;
- struct int_node *inode = NULL;
+ int cpu;
if (decoder->packet_count >= MAX_BUFFER - 1)
return OCSD_RESP_FATAL_SYS_ERR;
- /* Search the RB tree for the cpu associated with this traceID */
- inode = intlist__find(traceid_list, trace_chan_id);
- if (!inode)
+ if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
return OCSD_RESP_FATAL_SYS_ERR;
et = decoder->tail;
@@ -317,12 +321,18 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
decoder->packet_buffer[et].sample_type = sample_type;
decoder->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN;
- decoder->packet_buffer[et].cpu = *((int *)inode->priv);
+ decoder->packet_buffer[et].cpu = cpu;
decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
decoder->packet_buffer[et].instr_count = 0;
decoder->packet_buffer[et].last_instr_taken_branch = false;
decoder->packet_buffer[et].last_instr_size = 0;
+ decoder->packet_buffer[et].last_instr_type = 0;
+ decoder->packet_buffer[et].last_instr_subtype = 0;
+ decoder->packet_buffer[et].last_instr_cond = 0;
+ decoder->packet_buffer[et].flags = 0;
+ decoder->packet_buffer[et].exception_number = UINT32_MAX;
+ decoder->packet_buffer[et].trace_chan_id = trace_chan_id;
if (decoder->packet_count == MAX_BUFFER - 1)
return OCSD_RESP_WAIT;
@@ -366,6 +376,9 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
packet->start_addr = elem->st_addr;
packet->end_addr = elem->en_addr;
packet->instr_count = elem->num_instr_range;
+ packet->last_instr_type = elem->last_i_type;
+ packet->last_instr_subtype = elem->last_i_subtype;
+ packet->last_instr_cond = elem->last_instr_cond;
switch (elem->last_i_type) {
case OCSD_INSTR_BR:
@@ -374,6 +387,7 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
break;
case OCSD_INSTR_ISB:
case OCSD_INSTR_DSB_DMB:
+ case OCSD_INSTR_WFI_WFE:
case OCSD_INSTR_OTHER:
default:
packet->last_instr_taken_branch = false;
@@ -395,10 +409,20 @@ cs_etm_decoder__buffer_discontinuity(struct cs_etm_decoder *decoder,
static ocsd_datapath_resp_t
cs_etm_decoder__buffer_exception(struct cs_etm_decoder *decoder,
+ const ocsd_generic_trace_elem *elem,
const uint8_t trace_chan_id)
-{
- return cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
- CS_ETM_EXCEPTION);
+{ int ret = 0;
+ struct cs_etm_packet *packet;
+
+ ret = cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
+ CS_ETM_EXCEPTION);
+ if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
+ return ret;
+
+ packet = &decoder->packet_buffer[decoder->tail];
+ packet->exception_number = elem->exception_number;
+
+ return ret;
}
static ocsd_datapath_resp_t
@@ -432,7 +456,7 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION:
- resp = cs_etm_decoder__buffer_exception(decoder,
+ resp = cs_etm_decoder__buffer_exception(decoder, elem,
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
index a6407d41598f..3ab11dfa92ae 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
@@ -15,13 +15,6 @@
struct cs_etm_decoder;
-struct cs_etm_buffer {
- const unsigned char *buf;
- size_t len;
- u64 offset;
- u64 ref_timestamp;
-};
-
enum cs_etm_sample_type {
CS_ETM_EMPTY,
CS_ETM_RANGE,
@@ -43,8 +36,14 @@ struct cs_etm_packet {
u64 start_addr;
u64 end_addr;
u32 instr_count;
+ u32 last_instr_type;
+ u32 last_instr_subtype;
+ u32 flags;
+ u32 exception_number;
+ u8 last_instr_cond;
u8 last_instr_taken_branch;
u8 last_instr_size;
+ u8 trace_chan_id;
int cpu;
};
@@ -99,9 +98,10 @@ enum {
CS_ETM_PROTO_PTM,
};
-enum {
+enum cs_etm_decoder_operation {
CS_ETM_OPERATION_PRINT = 1,
CS_ETM_OPERATION_DECODE,
+ CS_ETM_OPERATION_MAX,
};
int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 27a374ddf661..de488b43f440 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -12,6 +12,7 @@
#include <linux/log2.h>
#include <linux/types.h>
+#include <opencsd/ocsd_if_types.h>
#include <stdlib.h>
#include "auxtrace.h"
@@ -24,6 +25,7 @@
#include "machine.h"
#include "map.h"
#include "perf.h"
+#include "symbol.h"
#include "thread.h"
#include "thread_map.h"
#include "thread-stack.h"
@@ -63,13 +65,10 @@ struct cs_etm_queue {
struct thread *thread;
struct cs_etm_decoder *decoder;
struct auxtrace_buffer *buffer;
- const struct cs_etm_state *state;
union perf_event *event_buf;
unsigned int queue_nr;
pid_t pid, tid;
int cpu;
- u64 time;
- u64 timestamp;
u64 offset;
u64 period_instructions;
struct branch_stack *last_branch;
@@ -77,11 +76,13 @@ struct cs_etm_queue {
size_t last_branch_pos;
struct cs_etm_packet *prev_packet;
struct cs_etm_packet *packet;
+ const unsigned char *buf;
+ size_t buf_len, buf_used;
};
static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
- pid_t tid, u64 time_);
+ pid_t tid);
/* PTMs ETMIDR [11:8] set to b0011 */
#define ETMIDR_PTM_VERSION 0x00000300
@@ -96,6 +97,34 @@ static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
return CS_ETM_PROTO_ETMV3;
}
+static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
+{
+ struct int_node *inode;
+ u64 *metadata;
+
+ inode = intlist__find(traceid_list, trace_chan_id);
+ if (!inode)
+ return -EINVAL;
+
+ metadata = inode->priv;
+ *magic = metadata[CS_ETM_MAGIC];
+ return 0;
+}
+
+int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
+{
+ struct int_node *inode;
+ u64 *metadata;
+
+ inode = intlist__find(traceid_list, trace_chan_id);
+ if (!inode)
+ return -EINVAL;
+
+ metadata = inode->priv;
+ *cpu = (int)metadata[CS_ETM_CPU];
+ return 0;
+}
+
static void cs_etm__packet_dump(const char *pkt_string)
{
const char *color = PERF_COLOR_BLUE;
@@ -109,10 +138,83 @@ static void cs_etm__packet_dump(const char *pkt_string)
fflush(stdout);
}
+static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
+ struct cs_etm_auxtrace *etm, int idx,
+ u32 etmidr)
+{
+ u64 **metadata = etm->metadata;
+
+ t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
+ t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
+ t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
+}
+
+static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
+ struct cs_etm_auxtrace *etm, int idx)
+{
+ u64 **metadata = etm->metadata;
+
+ t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
+ t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
+ t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
+ t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
+ t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
+ t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
+ t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
+}
+
+static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
+ struct cs_etm_auxtrace *etm)
+{
+ int i;
+ u32 etmidr;
+ u64 architecture;
+
+ for (i = 0; i < etm->num_cpu; i++) {
+ architecture = etm->metadata[i][CS_ETM_MAGIC];
+
+ switch (architecture) {
+ case __perf_cs_etmv3_magic:
+ etmidr = etm->metadata[i][CS_ETM_ETMIDR];
+ cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
+ break;
+ case __perf_cs_etmv4_magic:
+ cs_etm__set_trace_param_etmv4(t_params, etm, i);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
+ struct cs_etm_queue *etmq,
+ enum cs_etm_decoder_operation mode)
+{
+ int ret = -EINVAL;
+
+ if (!(mode < CS_ETM_OPERATION_MAX))
+ goto out;
+
+ d_params->packet_printer = cs_etm__packet_dump;
+ d_params->operation = mode;
+ d_params->data = etmq;
+ d_params->formatted = true;
+ d_params->fsyncs = false;
+ d_params->hsyncs = false;
+ d_params->frame_aligned = true;
+
+ ret = 0;
+out:
+ return ret;
+}
+
static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
struct auxtrace_buffer *buffer)
{
- int i, ret;
+ int ret;
const char *color = PERF_COLOR_BLUE;
struct cs_etm_decoder_params d_params;
struct cs_etm_trace_params *t_params;
@@ -126,48 +228,22 @@ static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
/* Use metadata to fill in trace parameters for trace decoder */
t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
- for (i = 0; i < etm->num_cpu; i++) {
- if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
- u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
-
- t_params[i].protocol =
- cs_etm__get_v7_protocol_version(etmidr);
- t_params[i].etmv3.reg_ctrl =
- etm->metadata[i][CS_ETM_ETMCR];
- t_params[i].etmv3.reg_trc_id =
- etm->metadata[i][CS_ETM_ETMTRACEIDR];
- } else if (etm->metadata[i][CS_ETM_MAGIC] ==
- __perf_cs_etmv4_magic) {
- t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
- t_params[i].etmv4.reg_idr0 =
- etm->metadata[i][CS_ETMV4_TRCIDR0];
- t_params[i].etmv4.reg_idr1 =
- etm->metadata[i][CS_ETMV4_TRCIDR1];
- t_params[i].etmv4.reg_idr2 =
- etm->metadata[i][CS_ETMV4_TRCIDR2];
- t_params[i].etmv4.reg_idr8 =
- etm->metadata[i][CS_ETMV4_TRCIDR8];
- t_params[i].etmv4.reg_configr =
- etm->metadata[i][CS_ETMV4_TRCCONFIGR];
- t_params[i].etmv4.reg_traceidr =
- etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
- }
- }
+
+ if (!t_params)
+ return;
+
+ if (cs_etm__init_trace_params(t_params, etm))
+ goto out_free;
/* Set decoder parameters to simply print the trace packets */
- d_params.packet_printer = cs_etm__packet_dump;
- d_params.operation = CS_ETM_OPERATION_PRINT;
- d_params.formatted = true;
- d_params.fsyncs = false;
- d_params.hsyncs = false;
- d_params.frame_aligned = true;
+ if (cs_etm__init_decoder_params(&d_params, NULL,
+ CS_ETM_OPERATION_PRINT))
+ goto out_free;
decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
- zfree(&t_params);
-
if (!decoder)
- return;
+ goto out_free;
do {
size_t consumed;
@@ -182,6 +258,9 @@ static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
} while (buffer_used < buffer->size);
cs_etm_decoder__free(decoder);
+
+out_free:
+ zfree(&t_params);
}
static int cs_etm__flush_events(struct perf_session *session,
@@ -205,7 +284,7 @@ static int cs_etm__flush_events(struct perf_session *session,
if (ret < 0)
return ret;
- return cs_etm__process_timeless_queues(etm, -1, MAX_TIMESTAMP - 1);
+ return cs_etm__process_timeless_queues(etm, -1);
}
static void cs_etm__free_queue(void *priv)
@@ -251,7 +330,7 @@ static void cs_etm__free(struct perf_session *session)
cs_etm__free_events(session);
session->auxtrace = NULL;
- /* First remove all traceID/CPU# nodes for the RB tree */
+ /* First remove all traceID/metadata nodes for the RB tree */
intlist__for_each_entry_safe(inode, tmp, traceid_list)
intlist__remove(traceid_list, inode);
/* Then the RB tree itself */
@@ -297,7 +376,7 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
struct addr_location al;
if (!etmq)
- return -1;
+ return 0;
machine = etmq->etm->machine;
cpumode = cs_etm__cpu_mode(etmq, address);
@@ -305,7 +384,7 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
thread = etmq->thread;
if (!thread) {
if (cpumode != PERF_RECORD_MISC_KERNEL)
- return -EINVAL;
+ return 0;
thread = etmq->etm->unknown_thread;
}
@@ -328,12 +407,10 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
return len;
}
-static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
- unsigned int queue_nr)
+static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm)
{
- int i;
struct cs_etm_decoder_params d_params;
- struct cs_etm_trace_params *t_params;
+ struct cs_etm_trace_params *t_params = NULL;
struct cs_etm_queue *etmq;
size_t szp = sizeof(struct cs_etm_packet);
@@ -345,11 +422,9 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
if (!etmq->packet)
goto out_free;
- if (etm->synth_opts.last_branch || etm->sample_branches) {
- etmq->prev_packet = zalloc(szp);
- if (!etmq->prev_packet)
- goto out_free;
- }
+ etmq->prev_packet = zalloc(szp);
+ if (!etmq->prev_packet)
+ goto out_free;
if (etm->synth_opts.last_branch) {
size_t sz = sizeof(struct branch_stack);
@@ -368,59 +443,22 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
if (!etmq->event_buf)
goto out_free;
- etmq->etm = etm;
- etmq->queue_nr = queue_nr;
- etmq->pid = -1;
- etmq->tid = -1;
- etmq->cpu = -1;
-
/* Use metadata to fill in trace parameters for trace decoder */
t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
if (!t_params)
goto out_free;
- for (i = 0; i < etm->num_cpu; i++) {
- if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
- u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
-
- t_params[i].protocol =
- cs_etm__get_v7_protocol_version(etmidr);
- t_params[i].etmv3.reg_ctrl =
- etm->metadata[i][CS_ETM_ETMCR];
- t_params[i].etmv3.reg_trc_id =
- etm->metadata[i][CS_ETM_ETMTRACEIDR];
- } else if (etm->metadata[i][CS_ETM_MAGIC] ==
- __perf_cs_etmv4_magic) {
- t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
- t_params[i].etmv4.reg_idr0 =
- etm->metadata[i][CS_ETMV4_TRCIDR0];
- t_params[i].etmv4.reg_idr1 =
- etm->metadata[i][CS_ETMV4_TRCIDR1];
- t_params[i].etmv4.reg_idr2 =
- etm->metadata[i][CS_ETMV4_TRCIDR2];
- t_params[i].etmv4.reg_idr8 =
- etm->metadata[i][CS_ETMV4_TRCIDR8];
- t_params[i].etmv4.reg_configr =
- etm->metadata[i][CS_ETMV4_TRCCONFIGR];
- t_params[i].etmv4.reg_traceidr =
- etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
- }
- }
+ if (cs_etm__init_trace_params(t_params, etm))
+ goto out_free;
- /* Set decoder parameters to simply print the trace packets */
- d_params.packet_printer = cs_etm__packet_dump;
- d_params.operation = CS_ETM_OPERATION_DECODE;
- d_params.formatted = true;
- d_params.fsyncs = false;
- d_params.hsyncs = false;
- d_params.frame_aligned = true;
- d_params.data = etmq;
+ /* Set decoder parameters to decode trace packets */
+ if (cs_etm__init_decoder_params(&d_params, etmq,
+ CS_ETM_OPERATION_DECODE))
+ goto out_free;
etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
- zfree(&t_params);
-
if (!etmq->decoder)
goto out_free;
@@ -433,14 +471,13 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
cs_etm__mem_access))
goto out_free_decoder;
- etmq->offset = 0;
- etmq->period_instructions = 0;
-
+ zfree(&t_params);
return etmq;
out_free_decoder:
cs_etm_decoder__free(etmq->decoder);
out_free:
+ zfree(&t_params);
zfree(&etmq->event_buf);
zfree(&etmq->last_branch);
zfree(&etmq->last_branch_rb);
@@ -455,24 +492,30 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
struct auxtrace_queue *queue,
unsigned int queue_nr)
{
+ int ret = 0;
struct cs_etm_queue *etmq = queue->priv;
if (list_empty(&queue->head) || etmq)
- return 0;
+ goto out;
- etmq = cs_etm__alloc_queue(etm, queue_nr);
+ etmq = cs_etm__alloc_queue(etm);
- if (!etmq)
- return -ENOMEM;
+ if (!etmq) {
+ ret = -ENOMEM;
+ goto out;
+ }
queue->priv = etmq;
-
- if (queue->cpu != -1)
- etmq->cpu = queue->cpu;
-
+ etmq->etm = etm;
+ etmq->queue_nr = queue_nr;
+ etmq->cpu = queue->cpu;
etmq->tid = queue->tid;
+ etmq->pid = -1;
+ etmq->offset = 0;
+ etmq->period_instructions = 0;
- return 0;
+out:
+ return ret;
}
static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
@@ -480,6 +523,9 @@ static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
unsigned int i;
int ret;
+ if (!etm->kernel_start)
+ etm->kernel_start = machine__kernel_start(etm->machine);
+
for (i = 0; i < etm->queues.nr_queues; i++) {
ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
if (ret)
@@ -637,7 +683,7 @@ static int cs_etm__inject_event(union perf_event *event,
static int
-cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
+cs_etm__get_trace(struct cs_etm_queue *etmq)
{
struct auxtrace_buffer *aux_buffer = etmq->buffer;
struct auxtrace_buffer *old_buffer = aux_buffer;
@@ -651,7 +697,7 @@ cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
if (!aux_buffer) {
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
- buff->len = 0;
+ etmq->buf_len = 0;
return 0;
}
@@ -671,13 +717,11 @@ cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
if (old_buffer)
auxtrace_buffer__drop_data(old_buffer);
- buff->offset = aux_buffer->offset;
- buff->len = aux_buffer->size;
- buff->buf = aux_buffer->data;
-
- buff->ref_timestamp = aux_buffer->reference;
+ etmq->buf_used = 0;
+ etmq->buf_len = aux_buffer->size;
+ etmq->buf = aux_buffer->data;
- return buff->len;
+ return etmq->buf_len;
}
static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
@@ -719,7 +763,7 @@ static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
sample.stream_id = etmq->etm->instructions_id;
sample.period = period;
sample.cpu = etmq->packet->cpu;
- sample.flags = 0;
+ sample.flags = etmq->prev_packet->flags;
sample.insn_len = 1;
sample.cpumode = event->sample.header.misc;
@@ -778,7 +822,7 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq)
sample.stream_id = etmq->etm->branches_id;
sample.period = 1;
sample.cpu = etmq->packet->cpu;
- sample.flags = 0;
+ sample.flags = etmq->prev_packet->flags;
sample.cpumode = event->sample.header.misc;
/*
@@ -935,7 +979,6 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
* PREV_PACKET is a branch.
*/
if (etm->synth_opts.last_branch &&
- etmq->prev_packet &&
etmq->prev_packet->sample_type == CS_ETM_RANGE &&
etmq->prev_packet->last_instr_taken_branch)
cs_etm__update_last_branch_rb(etmq);
@@ -968,7 +1011,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
etmq->period_instructions = instrs_over;
}
- if (etm->sample_branches && etmq->prev_packet) {
+ if (etm->sample_branches) {
bool generate_sample = false;
/* Generate sample for tracing on packet */
@@ -1025,9 +1068,6 @@ static int cs_etm__flush(struct cs_etm_queue *etmq)
struct cs_etm_auxtrace *etm = etmq->etm;
struct cs_etm_packet *tmp;
- if (!etmq->prev_packet)
- return 0;
-
/* Handle start tracing packet */
if (etmq->prev_packet->sample_type == CS_ETM_EMPTY)
goto swap_packet;
@@ -1106,95 +1146,489 @@ static int cs_etm__end_block(struct cs_etm_queue *etmq)
return 0;
}
+/*
+ * cs_etm__get_data_block: Fetch a block from the auxtrace_buffer queue
+ * if need be.
+ * Returns: < 0 if error
+ * = 0 if no more auxtrace_buffer to read
+ * > 0 if the current buffer isn't empty yet
+ */
+static int cs_etm__get_data_block(struct cs_etm_queue *etmq)
+{
+ int ret;
+
+ if (!etmq->buf_len) {
+ ret = cs_etm__get_trace(etmq);
+ if (ret <= 0)
+ return ret;
+ /*
+ * We cannot assume consecutive blocks in the data file
+ * are contiguous, reset the decoder to force re-sync.
+ */
+ ret = cs_etm_decoder__reset(etmq->decoder);
+ if (ret)
+ return ret;
+ }
+
+ return etmq->buf_len;
+}
+
+static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq,
+ struct cs_etm_packet *packet,
+ u64 end_addr)
+{
+ u16 instr16;
+ u32 instr32;
+ u64 addr;
+
+ switch (packet->isa) {
+ case CS_ETM_ISA_T32:
+ /*
+ * The SVC of T32 is defined in ARM DDI 0487D.a, F5.1.247:
+ *
+ * b'15 b'8
+ * +-----------------+--------+
+ * | 1 1 0 1 1 1 1 1 | imm8 |
+ * +-----------------+--------+
+ *
+ * According to the specifiction, it only defines SVC for T32
+ * with 16 bits instruction and has no definition for 32bits;
+ * so below only read 2 bytes as instruction size for T32.
+ */
+ addr = end_addr - 2;
+ cs_etm__mem_access(etmq, addr, sizeof(instr16), (u8 *)&instr16);
+ if ((instr16 & 0xFF00) == 0xDF00)
+ return true;
+
+ break;
+ case CS_ETM_ISA_A32:
+ /*
+ * The SVC of A32 is defined in ARM DDI 0487D.a, F5.1.247:
+ *
+ * b'31 b'28 b'27 b'24
+ * +---------+---------+-------------------------+
+ * | !1111 | 1 1 1 1 | imm24 |
+ * +---------+---------+-------------------------+
+ */
+ addr = end_addr - 4;
+ cs_etm__mem_access(etmq, addr, sizeof(instr32), (u8 *)&instr32);
+ if ((instr32 & 0x0F000000) == 0x0F000000 &&
+ (instr32 & 0xF0000000) != 0xF0000000)
+ return true;
+
+ break;
+ case CS_ETM_ISA_A64:
+ /*
+ * The SVC of A64 is defined in ARM DDI 0487D.a, C6.2.294:
+ *
+ * b'31 b'21 b'4 b'0
+ * +-----------------------+---------+-----------+
+ * | 1 1 0 1 0 1 0 0 0 0 0 | imm16 | 0 0 0 0 1 |
+ * +-----------------------+---------+-----------+
+ */
+ addr = end_addr - 4;
+ cs_etm__mem_access(etmq, addr, sizeof(instr32), (u8 *)&instr32);
+ if ((instr32 & 0xFFE0001F) == 0xd4000001)
+ return true;
+
+ break;
+ case CS_ETM_ISA_UNKNOWN:
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static bool cs_etm__is_syscall(struct cs_etm_queue *etmq, u64 magic)
+{
+ struct cs_etm_packet *packet = etmq->packet;
+ struct cs_etm_packet *prev_packet = etmq->prev_packet;
+
+ if (magic == __perf_cs_etmv3_magic)
+ if (packet->exception_number == CS_ETMV3_EXC_SVC)
+ return true;
+
+ /*
+ * ETMv4 exception type CS_ETMV4_EXC_CALL covers SVC, SMC and
+ * HVC cases; need to check if it's SVC instruction based on
+ * packet address.
+ */
+ if (magic == __perf_cs_etmv4_magic) {
+ if (packet->exception_number == CS_ETMV4_EXC_CALL &&
+ cs_etm__is_svc_instr(etmq, prev_packet,
+ prev_packet->end_addr))
+ return true;
+ }
+
+ return false;
+}
+
+static bool cs_etm__is_async_exception(struct cs_etm_queue *etmq, u64 magic)
+{
+ struct cs_etm_packet *packet = etmq->packet;
+
+ if (magic == __perf_cs_etmv3_magic)
+ if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
+ packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
+ packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
+ packet->exception_number == CS_ETMV3_EXC_IRQ ||
+ packet->exception_number == CS_ETMV3_EXC_FIQ)
+ return true;
+
+ if (magic == __perf_cs_etmv4_magic)
+ if (packet->exception_number == CS_ETMV4_EXC_RESET ||
+ packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
+ packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
+ packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
+ packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
+ packet->exception_number == CS_ETMV4_EXC_IRQ ||
+ packet->exception_number == CS_ETMV4_EXC_FIQ)
+ return true;
+
+ return false;
+}
+
+static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq, u64 magic)
+{
+ struct cs_etm_packet *packet = etmq->packet;
+ struct cs_etm_packet *prev_packet = etmq->prev_packet;
+
+ if (magic == __perf_cs_etmv3_magic)
+ if (packet->exception_number == CS_ETMV3_EXC_SMC ||
+ packet->exception_number == CS_ETMV3_EXC_HYP ||
+ packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
+ packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
+ packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
+ packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
+ packet->exception_number == CS_ETMV3_EXC_GENERIC)
+ return true;
+
+ if (magic == __perf_cs_etmv4_magic) {
+ if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
+ packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
+ packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
+ packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
+ return true;
+
+ /*
+ * For CS_ETMV4_EXC_CALL, except SVC other instructions
+ * (SMC, HVC) are taken as sync exceptions.
+ */
+ if (packet->exception_number == CS_ETMV4_EXC_CALL &&
+ !cs_etm__is_svc_instr(etmq, prev_packet,
+ prev_packet->end_addr))
+ return true;
+
+ /*
+ * ETMv4 has 5 bits for exception number; if the numbers
+ * are in the range ( CS_ETMV4_EXC_FIQ, CS_ETMV4_EXC_END ]
+ * they are implementation defined exceptions.
+ *
+ * For this case, simply take it as sync exception.
+ */
+ if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
+ packet->exception_number <= CS_ETMV4_EXC_END)
+ return true;
+ }
+
+ return false;
+}
+
+static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq)
+{
+ struct cs_etm_packet *packet = etmq->packet;
+ struct cs_etm_packet *prev_packet = etmq->prev_packet;
+ u64 magic;
+ int ret;
+
+ switch (packet->sample_type) {
+ case CS_ETM_RANGE:
+ /*
+ * Immediate branch instruction without neither link nor
+ * return flag, it's normal branch instruction within
+ * the function.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR &&
+ packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
+ packet->flags = PERF_IP_FLAG_BRANCH;
+
+ if (packet->last_instr_cond)
+ packet->flags |= PERF_IP_FLAG_CONDITIONAL;
+ }
+
+ /*
+ * Immediate branch instruction with link (e.g. BL), this is
+ * branch instruction for function call.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR &&
+ packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL;
+
+ /*
+ * Indirect branch instruction with link (e.g. BLR), this is
+ * branch instruction for function call.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
+ packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL;
+
+ /*
+ * Indirect branch instruction with subtype of
+ * OCSD_S_INSTR_V7_IMPLIED_RET, this is explicit hint for
+ * function return for A32/T32.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
+ packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN;
+
+ /*
+ * Indirect branch instruction without link (e.g. BR), usually
+ * this is used for function return, especially for functions
+ * within dynamic link lib.
+ */
+ if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
+ packet->last_instr_subtype == OCSD_S_INSTR_NONE)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN;
+
+ /* Return instruction for function return. */
+ if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
+ packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN;
+
+ /*
+ * Decoder might insert a discontinuity in the middle of
+ * instruction packets, fixup prev_packet with flag
+ * PERF_IP_FLAG_TRACE_BEGIN to indicate restarting trace.
+ */
+ if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
+ prev_packet->flags |= PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_TRACE_BEGIN;
+
+ /*
+ * If the previous packet is an exception return packet
+ * and the return address just follows SVC instuction,
+ * it needs to calibrate the previous packet sample flags
+ * as PERF_IP_FLAG_SYSCALLRET.
+ */
+ if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN |
+ PERF_IP_FLAG_INTERRUPT) &&
+ cs_etm__is_svc_instr(etmq, packet, packet->start_addr))
+ prev_packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN |
+ PERF_IP_FLAG_SYSCALLRET;
+ break;
+ case CS_ETM_DISCONTINUITY:
+ /*
+ * The trace is discontinuous, if the previous packet is
+ * instruction packet, set flag PERF_IP_FLAG_TRACE_END
+ * for previous packet.
+ */
+ if (prev_packet->sample_type == CS_ETM_RANGE)
+ prev_packet->flags |= PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_TRACE_END;
+ break;
+ case CS_ETM_EXCEPTION:
+ ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
+ if (ret)
+ return ret;
+
+ /* The exception is for system call. */
+ if (cs_etm__is_syscall(etmq, magic))
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL |
+ PERF_IP_FLAG_SYSCALLRET;
+ /*
+ * The exceptions are triggered by external signals from bus,
+ * interrupt controller, debug module, PE reset or halt.
+ */
+ else if (cs_etm__is_async_exception(etmq, magic))
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL |
+ PERF_IP_FLAG_ASYNC |
+ PERF_IP_FLAG_INTERRUPT;
+ /*
+ * Otherwise, exception is caused by trap, instruction &
+ * data fault, or alignment errors.
+ */
+ else if (cs_etm__is_sync_exception(etmq, magic))
+ packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_CALL |
+ PERF_IP_FLAG_INTERRUPT;
+
+ /*
+ * When the exception packet is inserted, since exception
+ * packet is not used standalone for generating samples
+ * and it's affiliation to the previous instruction range
+ * packet; so set previous range packet flags to tell perf
+ * it is an exception taken branch.
+ */
+ if (prev_packet->sample_type == CS_ETM_RANGE)
+ prev_packet->flags = packet->flags;
+ break;
+ case CS_ETM_EXCEPTION_RET:
+ /*
+ * When the exception return packet is inserted, since
+ * exception return packet is not used standalone for
+ * generating samples and it's affiliation to the previous
+ * instruction range packet; so set previous range packet
+ * flags to tell perf it is an exception return branch.
+ *
+ * The exception return can be for either system call or
+ * other exception types; unfortunately the packet doesn't
+ * contain exception type related info so we cannot decide
+ * the exception type purely based on exception return packet.
+ * If we record the exception number from exception packet and
+ * reuse it for excpetion return packet, this is not reliable
+ * due the trace can be discontinuity or the interrupt can
+ * be nested, thus the recorded exception number cannot be
+ * used for exception return packet for these two cases.
+ *
+ * For exception return packet, we only need to distinguish the
+ * packet is for system call or for other types. Thus the
+ * decision can be deferred when receive the next packet which
+ * contains the return address, based on the return address we
+ * can read out the previous instruction and check if it's a
+ * system call instruction and then calibrate the sample flag
+ * as needed.
+ */
+ if (prev_packet->sample_type == CS_ETM_RANGE)
+ prev_packet->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_RETURN |
+ PERF_IP_FLAG_INTERRUPT;
+ break;
+ case CS_ETM_EMPTY:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int cs_etm__decode_data_block(struct cs_etm_queue *etmq)
+{
+ int ret = 0;
+ size_t processed = 0;
+
+ /*
+ * Packets are decoded and added to the decoder's packet queue
+ * until the decoder packet processing callback has requested that
+ * processing stops or there is nothing left in the buffer. Normal
+ * operations that stop processing are a timestamp packet or a full
+ * decoder buffer queue.
+ */
+ ret = cs_etm_decoder__process_data_block(etmq->decoder,
+ etmq->offset,
+ &etmq->buf[etmq->buf_used],
+ etmq->buf_len,
+ &processed);
+ if (ret)
+ goto out;
+
+ etmq->offset += processed;
+ etmq->buf_used += processed;
+ etmq->buf_len -= processed;
+
+out:
+ return ret;
+}
+
+static int cs_etm__process_decoder_queue(struct cs_etm_queue *etmq)
+{
+ int ret;
+
+ /* Process each packet in this chunk */
+ while (1) {
+ ret = cs_etm_decoder__get_packet(etmq->decoder,
+ etmq->packet);
+ if (ret <= 0)
+ /*
+ * Stop processing this chunk on
+ * end of data or error
+ */
+ break;
+
+ /*
+ * Since packet addresses are swapped in packet
+ * handling within below switch() statements,
+ * thus setting sample flags must be called
+ * prior to switch() statement to use address
+ * information before packets swapping.
+ */
+ ret = cs_etm__set_sample_flags(etmq);
+ if (ret < 0)
+ break;
+
+ switch (etmq->packet->sample_type) {
+ case CS_ETM_RANGE:
+ /*
+ * If the packet contains an instruction
+ * range, generate instruction sequence
+ * events.
+ */
+ cs_etm__sample(etmq);
+ break;
+ case CS_ETM_EXCEPTION:
+ case CS_ETM_EXCEPTION_RET:
+ /*
+ * If the exception packet is coming,
+ * make sure the previous instruction
+ * range packet to be handled properly.
+ */
+ cs_etm__exception(etmq);
+ break;
+ case CS_ETM_DISCONTINUITY:
+ /*
+ * Discontinuity in trace, flush
+ * previous branch stack
+ */
+ cs_etm__flush(etmq);
+ break;
+ case CS_ETM_EMPTY:
+ /*
+ * Should not receive empty packet,
+ * report error.
+ */
+ pr_err("CS ETM Trace: empty packet\n");
+ return -EINVAL;
+ default:
+ break;
+ }
+ }
+
+ return ret;
+}
static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
{
- struct cs_etm_auxtrace *etm = etmq->etm;
- struct cs_etm_buffer buffer;
- size_t buffer_used, processed;
int err = 0;
- if (!etm->kernel_start)
- etm->kernel_start = machine__kernel_start(etm->machine);
-
/* Go through each buffer in the queue and decode them one by one */
while (1) {
- buffer_used = 0;
- memset(&buffer, 0, sizeof(buffer));
- err = cs_etm__get_trace(&buffer, etmq);
+ err = cs_etm__get_data_block(etmq);
if (err <= 0)
return err;
- /*
- * We cannot assume consecutive blocks in the data file are
- * contiguous, reset the decoder to force re-sync.
- */
- err = cs_etm_decoder__reset(etmq->decoder);
- if (err != 0)
- return err;
/* Run trace decoder until buffer consumed or end of trace */
do {
- processed = 0;
- err = cs_etm_decoder__process_data_block(
- etmq->decoder,
- etmq->offset,
- &buffer.buf[buffer_used],
- buffer.len - buffer_used,
- &processed);
+ err = cs_etm__decode_data_block(etmq);
if (err)
return err;
- etmq->offset += processed;
- buffer_used += processed;
-
- /* Process each packet in this chunk */
- while (1) {
- err = cs_etm_decoder__get_packet(etmq->decoder,
- etmq->packet);
- if (err <= 0)
- /*
- * Stop processing this chunk on
- * end of data or error
- */
- break;
-
- switch (etmq->packet->sample_type) {
- case CS_ETM_RANGE:
- /*
- * If the packet contains an instruction
- * range, generate instruction sequence
- * events.
- */
- cs_etm__sample(etmq);
- break;
- case CS_ETM_EXCEPTION:
- case CS_ETM_EXCEPTION_RET:
- /*
- * If the exception packet is coming,
- * make sure the previous instruction
- * range packet to be handled properly.
- */
- cs_etm__exception(etmq);
- break;
- case CS_ETM_DISCONTINUITY:
- /*
- * Discontinuity in trace, flush
- * previous branch stack
- */
- cs_etm__flush(etmq);
- break;
- case CS_ETM_EMPTY:
- /*
- * Should not receive empty packet,
- * report error.
- */
- pr_err("CS ETM Trace: empty packet\n");
- return -EINVAL;
- default:
- break;
- }
- }
- } while (buffer.len > buffer_used);
+ /*
+ * Process each packet in this chunk, nothing to do if
+ * an error occurs other than hoping the next one will
+ * be better.
+ */
+ err = cs_etm__process_decoder_queue(etmq);
+
+ } while (etmq->buf_len);
if (err == 0)
/* Flush any remaining branch stack entries */
@@ -1205,7 +1639,7 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
}
static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
- pid_t tid, u64 time_)
+ pid_t tid)
{
unsigned int i;
struct auxtrace_queues *queues = &etm->queues;
@@ -1215,7 +1649,6 @@ static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
struct cs_etm_queue *etmq = queue->priv;
if (etmq && ((tid == -1) || (etmq->tid == tid))) {
- etmq->time = time_;
cs_etm__set_pid_tid_cpu(etm, queue);
cs_etm__run_decoder(etmq);
}
@@ -1259,8 +1692,7 @@ static int cs_etm__process_event(struct perf_session *session,
if (event->header.type == PERF_RECORD_EXIT)
return cs_etm__process_timeless_queues(etm,
- event->fork.tid,
- sample->time);
+ event->fork.tid);
return 0;
}
@@ -1414,9 +1846,9 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
0xffffffff);
/*
- * Create an RB tree for traceID-CPU# tuple. Since the conversion has
- * to be made for each packet that gets decoded, optimizing access in
- * anything other than a sequential array is worth doing.
+ * Create an RB tree for traceID-metadata tuple. Since the conversion
+ * has to be made for each packet that gets decoded, optimizing access
+ * in anything other than a sequential array is worth doing.
*/
traceid_list = intlist__new(NULL);
if (!traceid_list) {
@@ -1482,8 +1914,8 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
err = -EINVAL;
goto err_free_metadata;
}
- /* All good, associate the traceID with the CPU# */
- inode->priv = &metadata[j][CS_ETM_CPU];
+ /* All good, associate the traceID with the metadata pointer */
+ inode->priv = metadata[j];
}
/*
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index 37f8d48179ca..0e97c196147a 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -53,7 +53,51 @@ enum {
CS_ETMV4_PRIV_MAX,
};
-/* RB tree for quick conversion between traceID and CPUs */
+/*
+ * ETMv3 exception encoding number:
+ * See Embedded Trace Macrocell spcification (ARM IHI 0014Q)
+ * table 7-12 Encoding of Exception[3:0] for non-ARMv7-M processors.
+ */
+enum {
+ CS_ETMV3_EXC_NONE = 0,
+ CS_ETMV3_EXC_DEBUG_HALT = 1,
+ CS_ETMV3_EXC_SMC = 2,
+ CS_ETMV3_EXC_HYP = 3,
+ CS_ETMV3_EXC_ASYNC_DATA_ABORT = 4,
+ CS_ETMV3_EXC_JAZELLE_THUMBEE = 5,
+ CS_ETMV3_EXC_PE_RESET = 8,
+ CS_ETMV3_EXC_UNDEFINED_INSTR = 9,
+ CS_ETMV3_EXC_SVC = 10,
+ CS_ETMV3_EXC_PREFETCH_ABORT = 11,
+ CS_ETMV3_EXC_DATA_FAULT = 12,
+ CS_ETMV3_EXC_GENERIC = 13,
+ CS_ETMV3_EXC_IRQ = 14,
+ CS_ETMV3_EXC_FIQ = 15,
+};
+
+/*
+ * ETMv4 exception encoding number:
+ * See ARM Embedded Trace Macrocell Architecture Specification (ARM IHI 0064D)
+ * table 6-12 Possible values for the TYPE field in an Exception instruction
+ * trace packet, for ARMv7-A/R and ARMv8-A/R PEs.
+ */
+enum {
+ CS_ETMV4_EXC_RESET = 0,
+ CS_ETMV4_EXC_DEBUG_HALT = 1,
+ CS_ETMV4_EXC_CALL = 2,
+ CS_ETMV4_EXC_TRAP = 3,
+ CS_ETMV4_EXC_SYSTEM_ERROR = 4,
+ CS_ETMV4_EXC_INST_DEBUG = 6,
+ CS_ETMV4_EXC_DATA_DEBUG = 7,
+ CS_ETMV4_EXC_ALIGNMENT = 10,
+ CS_ETMV4_EXC_INST_FAULT = 11,
+ CS_ETMV4_EXC_DATA_FAULT = 12,
+ CS_ETMV4_EXC_IRQ = 14,
+ CS_ETMV4_EXC_FIQ = 15,
+ CS_ETMV4_EXC_END = 31,
+};
+
+/* RB tree for quick conversion between traceID and metadata pointers */
struct intlist *traceid_list;
#define KiB(x) ((x) * 1024)
@@ -61,14 +105,15 @@ struct intlist *traceid_list;
#define CS_ETM_HEADER_SIZE (CS_HEADER_VERSION_0_MAX * sizeof(u64))
-static const u64 __perf_cs_etmv3_magic = 0x3030303030303030ULL;
-static const u64 __perf_cs_etmv4_magic = 0x4040404040404040ULL;
+#define __perf_cs_etmv3_magic 0x3030303030303030ULL
+#define __perf_cs_etmv4_magic 0x4040404040404040ULL
#define CS_ETMV3_PRIV_SIZE (CS_ETM_PRIV_MAX * sizeof(u64))
#define CS_ETMV4_PRIV_SIZE (CS_ETMV4_PRIV_MAX * sizeof(u64))
#ifdef HAVE_CSTRACE_SUPPORT
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session);
+int cs_etm__get_cpu(u8 trace_chan_id, int *cpu);
#else
static inline int
cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused,
@@ -76,6 +121,12 @@ cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused,
{
return -1;
}
+
+static inline int cs_etm__get_cpu(u8 trace_chan_id __maybe_unused,
+ int *cpu __maybe_unused)
+{
+ return -1;
+}
#endif
#endif
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 2a36fab76994..b79e1d6839ed 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* CTF writing support via babeltrace.
*
* Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
* Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * Released under the GPL v2. (and only v2, not any later version)
*/
#include <errno.h>
@@ -271,7 +270,7 @@ static int string_set_value(struct bt_ctf_field *field, const char *string)
if (i > 0)
strncpy(buffer, string, i);
}
- strncat(buffer + p, numstr, 4);
+ memcpy(buffer + p, numstr, 4);
p += 3;
}
}
@@ -310,7 +309,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
if (flags & TEP_FIELD_IS_DYNAMIC) {
unsigned long long tmp_val;
- tmp_val = tep_read_number(fmtf->event->pevent,
+ tmp_val = tep_read_number(fmtf->event->tep,
data + offset, len);
offset = tmp_val;
len = offset >> 16;
@@ -354,7 +353,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
unsigned long long value_int;
value_int = tep_read_number(
- fmtf->event->pevent,
+ fmtf->event->tep,
data + offset + i * len, len);
if (!(flags & TEP_FIELD_IS_SIGNED))
@@ -1578,7 +1577,7 @@ int bt_convert__perf2ctf(const char *input, const char *path,
{
struct perf_session *session;
struct perf_data data = {
- .file = { .path = input, .fd = -1 },
+ .path = input,
.mode = PERF_DATA_MODE_READ,
.force = opts->force,
};
@@ -1650,7 +1649,7 @@ int bt_convert__perf2ctf(const char *input, const char *path,
fprintf(stderr,
"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
- data.file.path, path);
+ data.path, path);
fprintf(stderr,
"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index d8cfc19ddb10..6a64f713710d 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -7,10 +7,148 @@
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
+#include <asm/bug.h>
+#include <sys/types.h>
+#include <dirent.h>
#include "data.h"
#include "util.h"
#include "debug.h"
+#include "header.h"
+
+static void close_dir(struct perf_data_file *files, int nr)
+{
+ while (--nr >= 1) {
+ close(files[nr].fd);
+ free(files[nr].path);
+ }
+ free(files);
+}
+
+void perf_data__close_dir(struct perf_data *data)
+{
+ close_dir(data->dir.files, data->dir.nr);
+}
+
+int perf_data__create_dir(struct perf_data *data, int nr)
+{
+ struct perf_data_file *files = NULL;
+ int i, ret = -1;
+
+ if (WARN_ON(!data->is_dir))
+ return -EINVAL;
+
+ files = zalloc(nr * sizeof(*files));
+ if (!files)
+ return -ENOMEM;
+
+ data->dir.version = PERF_DIR_VERSION;
+ data->dir.files = files;
+ data->dir.nr = nr;
+
+ for (i = 0; i < nr; i++) {
+ struct perf_data_file *file = &files[i];
+
+ if (asprintf(&file->path, "%s/data.%d", data->path, i) < 0)
+ goto out_err;
+
+ ret = open(file->path, O_RDWR|O_CREAT|O_TRUNC, S_IRUSR|S_IWUSR);
+ if (ret < 0)
+ goto out_err;
+
+ file->fd = ret;
+ }
+
+ return 0;
+
+out_err:
+ close_dir(files, i);
+ return ret;
+}
+
+int perf_data__open_dir(struct perf_data *data)
+{
+ struct perf_data_file *files = NULL;
+ struct dirent *dent;
+ int ret = -1;
+ DIR *dir;
+ int nr = 0;
+
+ if (WARN_ON(!data->is_dir))
+ return -EINVAL;
+
+ /* The version is provided by DIR_FORMAT feature. */
+ if (WARN_ON(data->dir.version != PERF_DIR_VERSION))
+ return -1;
+
+ dir = opendir(data->path);
+ if (!dir)
+ return -EINVAL;
+
+ while ((dent = readdir(dir)) != NULL) {
+ struct perf_data_file *file;
+ char path[PATH_MAX];
+ struct stat st;
+
+ snprintf(path, sizeof(path), "%s/%s", data->path, dent->d_name);
+ if (stat(path, &st))
+ continue;
+
+ if (!S_ISREG(st.st_mode) || strncmp(dent->d_name, "data", 4))
+ continue;
+
+ ret = -ENOMEM;
+
+ file = realloc(files, (nr + 1) * sizeof(*files));
+ if (!file)
+ goto out_err;
+
+ files = file;
+ file = &files[nr++];
+
+ file->path = strdup(path);
+ if (!file->path)
+ goto out_err;
+
+ ret = open(file->path, O_RDONLY);
+ if (ret < 0)
+ goto out_err;
+
+ file->fd = ret;
+ file->size = st.st_size;
+ }
+
+ if (!files)
+ return -EINVAL;
+
+ data->dir.files = files;
+ data->dir.nr = nr;
+ return 0;
+
+out_err:
+ close_dir(files, nr);
+ return ret;
+}
+
+int perf_data__update_dir(struct perf_data *data)
+{
+ int i;
+
+ if (WARN_ON(!data->is_dir))
+ return -EINVAL;
+
+ for (i = 0; i < data->dir.nr; i++) {
+ struct perf_data_file *file = &data->dir.files[i];
+ struct stat st;
+
+ if (fstat(file->fd, &st))
+ return -1;
+
+ file->size = st.st_size;
+ }
+
+ return 0;
+}
static bool check_pipe(struct perf_data *data)
{
@@ -19,11 +157,11 @@ static bool check_pipe(struct perf_data *data)
int fd = perf_data__is_read(data) ?
STDIN_FILENO : STDOUT_FILENO;
- if (!data->file.path) {
+ if (!data->path) {
if (!fstat(fd, &st) && S_ISFIFO(st.st_mode))
is_pipe = true;
} else {
- if (!strcmp(data->file.path, "-"))
+ if (!strcmp(data->path, "-"))
is_pipe = true;
}
@@ -37,18 +175,46 @@ static int check_backup(struct perf_data *data)
{
struct stat st;
- if (!stat(data->file.path, &st) && st.st_size) {
- /* TODO check errors properly */
+ if (perf_data__is_read(data))
+ return 0;
+
+ if (!stat(data->path, &st) && st.st_size) {
char oldname[PATH_MAX];
+ int ret;
+
snprintf(oldname, sizeof(oldname), "%s.old",
- data->file.path);
- unlink(oldname);
- rename(data->file.path, oldname);
+ data->path);
+
+ ret = rm_rf_perf_data(oldname);
+ if (ret) {
+ pr_err("Can't remove old data: %s (%s)\n",
+ ret == -2 ?
+ "Unknown file found" : strerror(errno),
+ oldname);
+ return -1;
+ }
+
+ if (rename(data->path, oldname)) {
+ pr_err("Can't move data: %s (%s to %s)\n",
+ strerror(errno),
+ data->path, oldname);
+ return -1;
+ }
}
return 0;
}
+static bool is_dir(struct perf_data *data)
+{
+ struct stat st;
+
+ if (stat(data->path, &st))
+ return false;
+
+ return (st.st_mode & S_IFMT) == S_IFDIR;
+}
+
static int open_file_read(struct perf_data *data)
{
struct stat st;
@@ -82,7 +248,7 @@ static int open_file_read(struct perf_data *data)
goto out_close;
}
- data->size = st.st_size;
+ data->file.size = st.st_size;
return fd;
out_close:
@@ -95,9 +261,6 @@ static int open_file_write(struct perf_data *data)
int fd;
char sbuf[STRERR_BUFSIZE];
- if (check_backup(data))
- return -1;
-
fd = open(data->file.path, O_CREAT|O_RDWR|O_TRUNC|O_CLOEXEC,
S_IRUSR|S_IWUSR);
@@ -115,8 +278,46 @@ static int open_file(struct perf_data *data)
fd = perf_data__is_read(data) ?
open_file_read(data) : open_file_write(data);
+ if (fd < 0) {
+ zfree(&data->file.path);
+ return -1;
+ }
+
data->file.fd = fd;
- return fd < 0 ? -1 : 0;
+ return 0;
+}
+
+static int open_file_dup(struct perf_data *data)
+{
+ data->file.path = strdup(data->path);
+ if (!data->file.path)
+ return -ENOMEM;
+
+ return open_file(data);
+}
+
+static int open_dir(struct perf_data *data)
+{
+ int ret;
+
+ /*
+ * So far we open only the header, so we can read the data version and
+ * layout.
+ */
+ if (asprintf(&data->file.path, "%s/header", data->path) < 0)
+ return -1;
+
+ if (perf_data__is_write(data) &&
+ mkdir(data->path, S_IRWXU) < 0)
+ return -1;
+
+ ret = open_file(data);
+
+ /* Cleanup whatever we managed to create so far. */
+ if (ret && perf_data__is_write(data))
+ rm_rf_perf_data(data->path);
+
+ return ret;
}
int perf_data__open(struct perf_data *data)
@@ -124,14 +325,25 @@ int perf_data__open(struct perf_data *data)
if (check_pipe(data))
return 0;
- if (!data->file.path)
- data->file.path = "perf.data";
+ if (!data->path)
+ data->path = "perf.data";
- return open_file(data);
+ if (check_backup(data))
+ return -1;
+
+ if (perf_data__is_read(data))
+ data->is_dir = is_dir(data);
+
+ return perf_data__is_dir(data) ?
+ open_dir(data) : open_file_dup(data);
}
void perf_data__close(struct perf_data *data)
{
+ if (perf_data__is_dir(data))
+ perf_data__close_dir(data);
+
+ zfree(&data->file.path);
close(data->file.fd);
}
@@ -149,9 +361,9 @@ ssize_t perf_data__write(struct perf_data *data,
int perf_data__switch(struct perf_data *data,
const char *postfix,
- size_t pos, bool at_exit)
+ size_t pos, bool at_exit,
+ char **new_filepath)
{
- char *new_filepath;
int ret;
if (check_pipe(data))
@@ -159,15 +371,15 @@ int perf_data__switch(struct perf_data *data,
if (perf_data__is_read(data))
return -EINVAL;
- if (asprintf(&new_filepath, "%s.%s", data->file.path, postfix) < 0)
+ if (asprintf(new_filepath, "%s.%s", data->path, postfix) < 0)
return -ENOMEM;
/*
* Only fire a warning, don't return error, continue fill
* original file.
*/
- if (rename(data->file.path, new_filepath))
- pr_warning("Failed to rename %s to %s\n", data->file.path, new_filepath);
+ if (rename(data->path, *new_filepath))
+ pr_warning("Failed to rename %s to %s\n", data->path, *new_filepath);
if (!at_exit) {
close(data->file.fd);
@@ -184,6 +396,22 @@ int perf_data__switch(struct perf_data *data,
}
ret = data->file.fd;
out:
- free(new_filepath);
return ret;
}
+
+unsigned long perf_data__size(struct perf_data *data)
+{
+ u64 size = data->file.size;
+ int i;
+
+ if (!data->is_dir)
+ return size;
+
+ for (i = 0; i < data->dir.nr; i++) {
+ struct perf_data_file *file = &data->dir.files[i];
+
+ size += file->size;
+ }
+
+ return size;
+}
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index 4828f7feea89..259868a39019 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -10,16 +10,24 @@ enum perf_data_mode {
};
struct perf_data_file {
- const char *path;
+ char *path;
int fd;
+ unsigned long size;
};
struct perf_data {
+ const char *path;
struct perf_data_file file;
bool is_pipe;
+ bool is_dir;
bool force;
- unsigned long size;
enum perf_data_mode mode;
+
+ struct {
+ u64 version;
+ struct perf_data_file *files;
+ int nr;
+ } dir;
};
static inline bool perf_data__is_read(struct perf_data *data)
@@ -37,14 +45,14 @@ static inline int perf_data__is_pipe(struct perf_data *data)
return data->is_pipe;
}
-static inline int perf_data__fd(struct perf_data *data)
+static inline bool perf_data__is_dir(struct perf_data *data)
{
- return data->file.fd;
+ return data->is_dir;
}
-static inline unsigned long perf_data__size(struct perf_data *data)
+static inline int perf_data__fd(struct perf_data *data)
{
- return data->size;
+ return data->file.fd;
}
int perf_data__open(struct perf_data *data);
@@ -62,5 +70,11 @@ ssize_t perf_data_file__write(struct perf_data_file *file,
*/
int perf_data__switch(struct perf_data *data,
const char *postfix,
- size_t pos, bool at_exit);
+ size_t pos, bool at_exit, char **new_filepath);
+
+int perf_data__create_dir(struct perf_data *data, int nr);
+int perf_data__open_dir(struct perf_data *data);
+void perf_data__close_dir(struct perf_data *data);
+int perf_data__update_dir(struct perf_data *data);
+unsigned long perf_data__size(struct perf_data *data);
#endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index 69fbb0a72d0c..2182f552aac6 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* db-export.c: Support for exporting data suitable for import to a database
* Copyright (c) 2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <errno.h>
@@ -20,6 +11,7 @@
#include "thread.h"
#include "comm.h"
#include "symbol.h"
+#include "map.h"
#include "event.h"
#include "util.h"
#include "thread-stack.h"
@@ -509,18 +501,23 @@ int db_export__call_path(struct db_export *dbe, struct call_path *cp)
return 0;
}
-int db_export__call_return(struct db_export *dbe, struct call_return *cr)
+int db_export__call_return(struct db_export *dbe, struct call_return *cr,
+ u64 *parent_db_id)
{
int err;
- if (cr->db_id)
- return 0;
-
err = db_export__call_path(dbe, cr->cp);
if (err)
return err;
- cr->db_id = ++dbe->call_return_last_db_id;
+ if (!cr->db_id)
+ cr->db_id = ++dbe->call_return_last_db_id;
+
+ if (parent_db_id) {
+ if (!*parent_db_id)
+ *parent_db_id = ++dbe->call_return_last_db_id;
+ cr->parent_db_id = *parent_db_id;
+ }
if (dbe->export_call_return)
return dbe->export_call_return(dbe, cr);
diff --git a/tools/perf/util/db-export.h b/tools/perf/util/db-export.h
index 67bc6b8ad2d6..e8a64028a386 100644
--- a/tools/perf/util/db-export.h
+++ b/tools/perf/util/db-export.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* db-export.h: Support for exporting data suitable for import to a database
* Copyright (c) 2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#ifndef __PERF_DB_EXPORT_H
@@ -104,6 +95,7 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
int db_export__branch_types(struct db_export *dbe);
int db_export__call_path(struct db_export *dbe, struct call_path *cp);
-int db_export__call_return(struct db_export *dbe, struct call_return *cr);
+int db_export__call_return(struct db_export *dbe, struct call_return *cr,
+ u64 *parent_db_id);
#endif
diff --git a/tools/perf/util/drv_configs.c b/tools/perf/util/drv_configs.c
deleted file mode 100644
index eec754243f4d..000000000000
--- a/tools/perf/util/drv_configs.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * drv_configs.h: Interface to apply PMU specific configuration
- * Copyright (c) 2016-2018, Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#include "drv_configs.h"
-#include "evlist.h"
-#include "evsel.h"
-#include "pmu.h"
-#include <errno.h>
-
-static int
-perf_evsel__apply_drv_configs(struct perf_evsel *evsel,
- struct perf_evsel_config_term **err_term)
-{
- bool found = false;
- int err = 0;
- struct perf_evsel_config_term *term;
- struct perf_pmu *pmu = NULL;
-
- while ((pmu = perf_pmu__scan(pmu)) != NULL)
- if (pmu->type == evsel->attr.type) {
- found = true;
- break;
- }
-
- list_for_each_entry(term, &evsel->config_terms, list) {
- if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG)
- continue;
-
- /*
- * We have a configuration term, report an error if we
- * can't find the PMU or if the PMU driver doesn't support
- * cmd line driver configuration.
- */
- if (!found || !pmu->set_drv_config) {
- err = -EINVAL;
- *err_term = term;
- break;
- }
-
- err = pmu->set_drv_config(term);
- if (err) {
- *err_term = term;
- break;
- }
- }
-
- return err;
-}
-
-int perf_evlist__apply_drv_configs(struct perf_evlist *evlist,
- struct perf_evsel **err_evsel,
- struct perf_evsel_config_term **err_term)
-{
- struct perf_evsel *evsel;
- int err = 0;
-
- evlist__for_each_entry(evlist, evsel) {
- err = perf_evsel__apply_drv_configs(evsel, err_term);
- if (err) {
- *err_evsel = evsel;
- break;
- }
- }
-
- return err;
-}
diff --git a/tools/perf/util/drv_configs.h b/tools/perf/util/drv_configs.h
deleted file mode 100644
index 32bc9babc2e0..000000000000
--- a/tools/perf/util/drv_configs.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * drv_configs.h: Interface to apply PMU specific configuration
- * Copyright (c) 2016-2018, Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#ifndef __PERF_DRV_CONFIGS_H
-#define __PERF_DRV_CONFIGS_H
-
-#include "drv_configs.h"
-#include "evlist.h"
-#include "evsel.h"
-
-int perf_evlist__apply_drv_configs(struct perf_evlist *evlist,
- struct perf_evsel **err_evsel,
- struct perf_evsel_config_term **term);
-#endif
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 62c8cf622607..e059976d9d93 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -8,8 +8,11 @@
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
+#include <libgen.h>
#include "compress.h"
+#include "namespaces.h"
#include "path.h"
+#include "map.h"
#include "symbol.h"
#include "srcline.h"
#include "dso.h"
@@ -181,6 +184,7 @@ int dso__read_binary_type_filename(const struct dso *dso,
case DSO_BINARY_TYPE__KALLSYMS:
case DSO_BINARY_TYPE__GUEST_KALLSYMS:
case DSO_BINARY_TYPE__JAVA_JIT:
+ case DSO_BINARY_TYPE__BPF_PROG_INFO:
case DSO_BINARY_TYPE__NOT_FOUND:
ret = -1;
break;
@@ -1138,28 +1142,34 @@ void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
static void dso__set_basename(struct dso *dso)
{
- /*
- * basename() may modify path buffer, so we must pass
- * a copy.
- */
- char *base, *lname = strdup(dso->long_name);
+ char *base, *lname;
+ int tid;
- if (!lname)
- return;
-
- /*
- * basename() may return a pointer to internal
- * storage which is reused in subsequent calls
- * so copy the result.
- */
- base = strdup(basename(lname));
+ if (sscanf(dso->long_name, "/tmp/perf-%d.map", &tid) == 1) {
+ if (asprintf(&base, "[JIT] tid %d", tid) < 0)
+ return;
+ } else {
+ /*
+ * basename() may modify path buffer, so we must pass
+ * a copy.
+ */
+ lname = strdup(dso->long_name);
+ if (!lname)
+ return;
- free(lname);
+ /*
+ * basename() may return a pointer to internal
+ * storage which is reused in subsequent calls
+ * so copy the result.
+ */
+ base = strdup(basename(lname));
- if (!base)
- return;
+ free(lname);
- dso__set_short_name(dso, base, true);
+ if (!base)
+ return;
+ }
+ dso__set_short_name(dso, base, true);
}
int dso__name_len(const struct dso *dso)
@@ -1195,10 +1205,10 @@ struct dso *dso__new(const char *name)
strcpy(dso->name, name);
dso__set_long_name(dso, dso->name, false);
dso__set_short_name(dso, dso->name, false);
- dso->symbols = dso->symbol_names = RB_ROOT;
+ dso->symbols = dso->symbol_names = RB_ROOT_CACHED;
dso->data.cache = RB_ROOT;
- dso->inlined_nodes = RB_ROOT;
- dso->srclines = RB_ROOT;
+ dso->inlined_nodes = RB_ROOT_CACHED;
+ dso->srclines = RB_ROOT_CACHED;
dso->data.fd = -1;
dso->data.status = DSO_DATA_STATUS_UNKNOWN;
dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
@@ -1467,7 +1477,7 @@ size_t dso__fprintf(struct dso *dso, FILE *fp)
ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
ret += dso__fprintf_buildid(dso, fp);
ret += fprintf(fp, ")\n");
- for (nd = rb_first(&dso->symbols); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
ret += symbol__fprintf(pos, fp);
}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 8c8a7abe809d..6e3f63781e51 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -7,13 +7,15 @@
#include <linux/rbtree.h>
#include <sys/types.h>
#include <stdbool.h>
+#include <stdio.h>
#include "rwsem.h"
-#include <linux/types.h>
#include <linux/bitops.h>
-#include "map.h"
-#include "namespaces.h"
#include "build-id.h"
+struct machine;
+struct map;
+struct perf_env;
+
enum dso_binary_type {
DSO_BINARY_TYPE__KALLSYMS = 0,
DSO_BINARY_TYPE__GUEST_KALLSYMS,
@@ -34,6 +36,7 @@ enum dso_binary_type {
DSO_BINARY_TYPE__KCORE,
DSO_BINARY_TYPE__GUEST_KCORE,
DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
+ DSO_BINARY_TYPE__BPF_PROG_INFO,
DSO_BINARY_TYPE__NOT_FOUND,
};
@@ -140,10 +143,10 @@ struct dso {
struct list_head node;
struct rb_node rb_node; /* rbtree node sorted by long name */
struct rb_root *root; /* root of rbtree that rb_node is in */
- struct rb_root symbols;
- struct rb_root symbol_names;
- struct rb_root inlined_nodes;
- struct rb_root srclines;
+ struct rb_root_cached symbols;
+ struct rb_root_cached symbol_names;
+ struct rb_root_cached inlined_nodes;
+ struct rb_root_cached srclines;
struct {
u64 addr;
struct symbol *symbol;
@@ -188,6 +191,12 @@ struct dso {
u64 debug_frame_offset;
u64 eh_frame_hdr_offset;
} data;
+ /* bpf prog information */
+ struct {
+ u32 id;
+ u32 sub_id;
+ struct perf_env *env;
+ } bpf_prog;
union { /* Tool specific area */
void *priv;
@@ -235,7 +244,7 @@ bool dso__loaded(const struct dso *dso);
static inline bool dso__has_symbols(const struct dso *dso)
{
- return !RB_EMPTY_ROOT(&dso->symbols);
+ return !RB_EMPTY_ROOT(&dso->symbols.rb_root);
}
bool dso__sorted_by_name(const struct dso *dso);
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 7eb7de5aee44..218bfea8f8a8 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* dwarf-aux.c : libdw auxiliary interfaces
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#include <errno.h>
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index 8ac53bf1ec4e..0489b0cf8e2c 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _DWARF_AUX_H
#define _DWARF_AUX_H
/*
* dwarf-aux.h : libdw auxiliary interfaces
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#include <dwarf.h>
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 4c23779e271a..6a3eaf7d9353 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -3,15 +3,167 @@
#include "env.h"
#include "sane_ctype.h"
#include "util.h"
+#include "bpf-event.h"
#include <errno.h>
#include <sys/utsname.h>
+#include <bpf/libbpf.h>
struct perf_env perf_env;
+void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node)
+{
+ __u32 prog_id = info_node->info_linear->info.id;
+ struct bpf_prog_info_node *node;
+ struct rb_node *parent = NULL;
+ struct rb_node **p;
+
+ down_write(&env->bpf_progs.lock);
+ p = &env->bpf_progs.infos.rb_node;
+
+ while (*p != NULL) {
+ parent = *p;
+ node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
+ if (prog_id < node->info_linear->info.id) {
+ p = &(*p)->rb_left;
+ } else if (prog_id > node->info_linear->info.id) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_debug("duplicated bpf prog info %u\n", prog_id);
+ goto out;
+ }
+ }
+
+ rb_link_node(&info_node->rb_node, parent, p);
+ rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
+ env->bpf_progs.infos_cnt++;
+out:
+ up_write(&env->bpf_progs.lock);
+}
+
+struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ __u32 prog_id)
+{
+ struct bpf_prog_info_node *node = NULL;
+ struct rb_node *n;
+
+ down_read(&env->bpf_progs.lock);
+ n = env->bpf_progs.infos.rb_node;
+
+ while (n) {
+ node = rb_entry(n, struct bpf_prog_info_node, rb_node);
+ if (prog_id < node->info_linear->info.id)
+ n = n->rb_left;
+ else if (prog_id > node->info_linear->info.id)
+ n = n->rb_right;
+ else
+ goto out;
+ }
+ node = NULL;
+
+out:
+ up_read(&env->bpf_progs.lock);
+ return node;
+}
+
+void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
+{
+ struct rb_node *parent = NULL;
+ __u32 btf_id = btf_node->id;
+ struct btf_node *node;
+ struct rb_node **p;
+
+ down_write(&env->bpf_progs.lock);
+ p = &env->bpf_progs.btfs.rb_node;
+
+ while (*p != NULL) {
+ parent = *p;
+ node = rb_entry(parent, struct btf_node, rb_node);
+ if (btf_id < node->id) {
+ p = &(*p)->rb_left;
+ } else if (btf_id > node->id) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_debug("duplicated btf %u\n", btf_id);
+ goto out;
+ }
+ }
+
+ rb_link_node(&btf_node->rb_node, parent, p);
+ rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
+ env->bpf_progs.btfs_cnt++;
+out:
+ up_write(&env->bpf_progs.lock);
+}
+
+struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
+{
+ struct btf_node *node = NULL;
+ struct rb_node *n;
+
+ down_read(&env->bpf_progs.lock);
+ n = env->bpf_progs.btfs.rb_node;
+
+ while (n) {
+ node = rb_entry(n, struct btf_node, rb_node);
+ if (btf_id < node->id)
+ n = n->rb_left;
+ else if (btf_id > node->id)
+ n = n->rb_right;
+ else
+ goto out;
+ }
+ node = NULL;
+
+out:
+ up_read(&env->bpf_progs.lock);
+ return node;
+}
+
+/* purge data in bpf_progs.infos tree */
+static void perf_env__purge_bpf(struct perf_env *env)
+{
+ struct rb_root *root;
+ struct rb_node *next;
+
+ down_write(&env->bpf_progs.lock);
+
+ root = &env->bpf_progs.infos;
+ next = rb_first(root);
+
+ while (next) {
+ struct bpf_prog_info_node *node;
+
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ next = rb_next(&node->rb_node);
+ rb_erase(&node->rb_node, root);
+ free(node);
+ }
+
+ env->bpf_progs.infos_cnt = 0;
+
+ root = &env->bpf_progs.btfs;
+ next = rb_first(root);
+
+ while (next) {
+ struct btf_node *node;
+
+ node = rb_entry(next, struct btf_node, rb_node);
+ next = rb_next(&node->rb_node);
+ rb_erase(&node->rb_node, root);
+ free(node);
+ }
+
+ env->bpf_progs.btfs_cnt = 0;
+
+ up_write(&env->bpf_progs.lock);
+}
+
void perf_env__exit(struct perf_env *env)
{
int i;
+ perf_env__purge_bpf(env);
zfree(&env->hostname);
zfree(&env->os_release);
zfree(&env->version);
@@ -38,6 +190,13 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->memory_nodes);
}
+void perf_env__init(struct perf_env *env)
+{
+ env->bpf_progs.infos = RB_ROOT;
+ env->bpf_progs.btfs = RB_ROOT;
+ init_rwsem(&env->bpf_progs.lock);
+}
+
int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
{
int i;
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index d01b8355f4ca..271a90b326c4 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -3,7 +3,9 @@
#define __PERF_ENV_H
#include <linux/types.h>
+#include <linux/rbtree.h>
#include "cpumap.h"
+#include "rwsem.h"
struct cpu_topology_map {
int socket_id;
@@ -60,12 +62,38 @@ struct perf_env {
struct cpu_topology_map *cpu;
struct cpu_cache_level *caches;
int caches_cnt;
+ u32 comp_ratio;
+ u32 comp_ver;
+ u32 comp_type;
+ u32 comp_level;
+ u32 comp_mmap_len;
struct numa_node *numa_nodes;
struct memory_node *memory_nodes;
unsigned long long memory_bsize;
u64 clockid_res_ns;
+
+ /*
+ * bpf_info_lock protects bpf rbtrees. This is needed because the
+ * trees are accessed by different threads in perf-top
+ */
+ struct {
+ struct rw_semaphore lock;
+ struct rb_root infos;
+ u32 infos_cnt;
+ struct rb_root btfs;
+ u32 btfs_cnt;
+ } bpf_progs;
+};
+
+enum perf_compress_type {
+ PERF_COMP_NONE = 0,
+ PERF_COMP_ZSTD,
+ PERF_COMP_MAX
};
+struct bpf_prog_info_node;
+struct btf_node;
+
extern struct perf_env perf_env;
void perf_env__exit(struct perf_env *env);
@@ -80,4 +108,11 @@ const char *perf_env__arch(struct perf_env *env);
const char *perf_env__raw_arch(struct perf_env *env);
int perf_env__nr_cpus_avail(struct perf_env *env);
+void perf_env__init(struct perf_env *env);
+void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node);
+struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ __u32 prog_id);
+void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
+struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
#endif /* __PERF_ENV_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 937a5a4f71cc..d1ad6c419724 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -21,9 +21,13 @@
#include "thread.h"
#include "thread_map.h"
#include "sane_ctype.h"
+#include "map.h"
+#include "symbol.h"
#include "symbol/kallsyms.h"
#include "asm/bug.h"
#include "stat.h"
+#include "session.h"
+#include "bpf-event.h"
#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
@@ -45,6 +49,8 @@ static const char *perf_event__names[] = {
[PERF_RECORD_SWITCH] = "SWITCH",
[PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE",
[PERF_RECORD_NAMESPACES] = "NAMESPACES",
+ [PERF_RECORD_KSYMBOL] = "KSYMBOL",
+ [PERF_RECORD_BPF_EVENT] = "BPF_EVENT",
[PERF_RECORD_HEADER_ATTR] = "ATTR",
[PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE",
[PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA",
@@ -62,6 +68,7 @@ static const char *perf_event__names[] = {
[PERF_RECORD_EVENT_UPDATE] = "EVENT_UPDATE",
[PERF_RECORD_TIME_CONV] = "TIME_CONV",
[PERF_RECORD_HEADER_FEATURE] = "FEATURE",
+ [PERF_RECORD_COMPRESSED] = "COMPRESSED",
};
static const char *perf_ns__names[] = {
@@ -1329,6 +1336,22 @@ int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
return machine__process_switch_event(machine, event);
}
+int perf_event__process_ksymbol(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine)
+{
+ return machine__process_ksymbol(machine, event, sample);
+}
+
+int perf_event__process_bpf_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine)
+{
+ return machine__process_bpf_event(machine, event, sample);
+}
+
size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
{
return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
@@ -1461,6 +1484,21 @@ static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
return fprintf(fp, " lost %" PRIu64 "\n", event->lost.lost);
}
+size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " ksymbol event with addr %" PRIx64 " len %u type %u flags 0x%x name %s\n",
+ event->ksymbol_event.addr, event->ksymbol_event.len,
+ event->ksymbol_event.ksym_type,
+ event->ksymbol_event.flags, event->ksymbol_event.name);
+}
+
+size_t perf_event__fprintf_bpf_event(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " bpf event with type %u, flags %u, id %u\n",
+ event->bpf_event.type, event->bpf_event.flags,
+ event->bpf_event.id);
+}
+
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
size_t ret = fprintf(fp, "PERF_RECORD_%s",
@@ -1496,6 +1534,12 @@ size_t perf_event__fprintf(union perf_event *event, FILE *fp)
case PERF_RECORD_LOST:
ret += perf_event__fprintf_lost(event, fp);
break;
+ case PERF_RECORD_KSYMBOL:
+ ret += perf_event__fprintf_ksymbol(event, fp);
+ break;
+ case PERF_RECORD_BPF_EVENT:
+ ret += perf_event__fprintf_bpf_event(event, fp);
+ break;
default:
ret += fprintf(fp, "\n");
}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index eb95f3384958..9e999550f247 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -5,6 +5,8 @@
#include <limits.h>
#include <stdio.h>
#include <linux/kernel.h>
+#include <linux/bpf.h>
+#include <linux/perf_event.h>
#include "../perf.h"
#include "build-id.h"
@@ -84,6 +86,29 @@ struct throttle_event {
u64 stream_id;
};
+#ifndef KSYM_NAME_LEN
+#define KSYM_NAME_LEN 256
+#endif
+
+struct ksymbol_event {
+ struct perf_event_header header;
+ u64 addr;
+ u32 len;
+ u16 ksym_type;
+ u16 flags;
+ char name[KSYM_NAME_LEN];
+};
+
+struct bpf_event {
+ struct perf_event_header header;
+ u16 type;
+ u16 flags;
+ u32 id;
+
+ /* for bpf_prog types */
+ u8 tag[BPF_TAG_SIZE]; // prog tag
+};
+
#define PERF_SAMPLE_MASK \
(PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
@@ -137,26 +162,7 @@ struct ip_callchain {
u64 ips[0];
};
-struct branch_flags {
- u64 mispred:1;
- u64 predicted:1;
- u64 in_tx:1;
- u64 abort:1;
- u64 cycles:16;
- u64 type:4;
- u64 reserved:40;
-};
-
-struct branch_entry {
- u64 from;
- u64 to;
- struct branch_flags flags;
-};
-
-struct branch_stack {
- u64 nr;
- struct branch_entry entries[0];
-};
+struct branch_stack;
enum {
PERF_IP_FLAG_BRANCH = 1ULL << 0,
@@ -249,6 +255,7 @@ enum perf_user_event_type { /* above any possible kernel type */
PERF_RECORD_EVENT_UPDATE = 78,
PERF_RECORD_TIME_CONV = 79,
PERF_RECORD_HEADER_FEATURE = 80,
+ PERF_RECORD_COMPRESSED = 81,
PERF_RECORD_HEADER_MAX
};
@@ -527,8 +534,9 @@ struct auxtrace_error_event {
u32 cpu;
u32 pid;
u32 tid;
- u32 reserved__; /* For alignment */
+ u32 fmt;
u64 ip;
+ u64 time;
char msg[MAX_AUXTRACE_ERROR_MSG];
};
@@ -620,6 +628,11 @@ struct feature_event {
char data[];
};
+struct compressed_event {
+ struct perf_event_header header;
+ char data[];
+};
+
union perf_event {
struct perf_event_header header;
struct mmap_event mmap;
@@ -651,6 +664,9 @@ union perf_event {
struct stat_round_event stat_round;
struct time_conv_event time_conv;
struct feature_event feat;
+ struct ksymbol_event ksymbol_event;
+ struct bpf_event bpf_event;
+ struct compressed_event pack;
};
void perf_event__print_totals(void);
@@ -748,6 +764,14 @@ int perf_event__process_exit(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
+int perf_event__process_ksymbol(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
+int perf_event__process_bpf_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine);
int perf_tool__process_synth_event(struct perf_tool *tool,
union perf_event *event,
struct machine *machine,
@@ -811,6 +835,8 @@ size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_bpf_event(union perf_event *event, FILE *fp);
size_t perf_event__fprintf(union perf_event *event, FILE *fp);
int kallsyms__get_function_start(const char *kallsyms_filename,
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 8c902276d4b4..a474ede17cd6 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
*
* Parts came from builtin-{top,stat,record}.c, see those files for further
* copyright notes.
- *
- * Released under the GPL v2. (and only v2, not any later version)
*/
#include "util.h"
#include <api/fs/fs.h>
@@ -19,6 +18,7 @@
#include "debug.h"
#include "units.h"
#include "asm/bug.h"
+#include "bpf-event.h"
#include <signal.h>
#include <unistd.h>
@@ -230,20 +230,6 @@ void perf_evlist__set_leader(struct perf_evlist *evlist)
}
}
-void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
-{
- attr->precise_ip = 3;
-
- while (attr->precise_ip != 0) {
- int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
- if (fd != -1) {
- close(fd);
- break;
- }
- --attr->precise_ip;
- }
-}
-
int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise)
{
struct perf_evsel *evsel = perf_evsel__new_cycles(precise);
@@ -1022,7 +1008,8 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
*/
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite, int nr_cblocks)
+ bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
+ int comp_level)
{
struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus;
@@ -1032,7 +1019,8 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
* Its value is decided by evsel's write_backward.
* So &mp should not be passed through const pointer.
*/
- struct mmap_params mp = { .nr_cblocks = nr_cblocks };
+ struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity, .flush = flush,
+ .comp_level = comp_level };
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
@@ -1064,7 +1052,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
{
- return perf_evlist__mmap_ex(evlist, pages, 0, false, 0);
+ return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
}
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
@@ -1841,3 +1829,125 @@ struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evsel_list,
}
return leader;
}
+
+int perf_evlist__add_sb_event(struct perf_evlist **evlist,
+ struct perf_event_attr *attr,
+ perf_evsel__sb_cb_t cb,
+ void *data)
+{
+ struct perf_evsel *evsel;
+ bool new_evlist = (*evlist) == NULL;
+
+ if (*evlist == NULL)
+ *evlist = perf_evlist__new();
+ if (*evlist == NULL)
+ return -1;
+
+ if (!attr->sample_id_all) {
+ pr_warning("enabling sample_id_all for all side band events\n");
+ attr->sample_id_all = 1;
+ }
+
+ evsel = perf_evsel__new_idx(attr, (*evlist)->nr_entries);
+ if (!evsel)
+ goto out_err;
+
+ evsel->side_band.cb = cb;
+ evsel->side_band.data = data;
+ perf_evlist__add(*evlist, evsel);
+ return 0;
+
+out_err:
+ if (new_evlist) {
+ perf_evlist__delete(*evlist);
+ *evlist = NULL;
+ }
+ return -1;
+}
+
+static void *perf_evlist__poll_thread(void *arg)
+{
+ struct perf_evlist *evlist = arg;
+ bool draining = false;
+ int i, done = 0;
+
+ while (!done) {
+ bool got_data = false;
+
+ if (evlist->thread.done)
+ draining = true;
+
+ if (!draining)
+ perf_evlist__poll(evlist, 1000);
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ struct perf_mmap *map = &evlist->mmap[i];
+ union perf_event *event;
+
+ if (perf_mmap__read_init(map))
+ continue;
+ while ((event = perf_mmap__read_event(map)) != NULL) {
+ struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
+
+ if (evsel && evsel->side_band.cb)
+ evsel->side_band.cb(event, evsel->side_band.data);
+ else
+ pr_warning("cannot locate proper evsel for the side band event\n");
+
+ perf_mmap__consume(map);
+ got_data = true;
+ }
+ perf_mmap__read_done(map);
+ }
+
+ if (draining && !got_data)
+ break;
+ }
+ return NULL;
+}
+
+int perf_evlist__start_sb_thread(struct perf_evlist *evlist,
+ struct target *target)
+{
+ struct perf_evsel *counter;
+
+ if (!evlist)
+ return 0;
+
+ if (perf_evlist__create_maps(evlist, target))
+ goto out_delete_evlist;
+
+ evlist__for_each_entry(evlist, counter) {
+ if (perf_evsel__open(counter, evlist->cpus,
+ evlist->threads) < 0)
+ goto out_delete_evlist;
+ }
+
+ if (perf_evlist__mmap(evlist, UINT_MAX))
+ goto out_delete_evlist;
+
+ evlist__for_each_entry(evlist, counter) {
+ if (perf_evsel__enable(counter))
+ goto out_delete_evlist;
+ }
+
+ evlist->thread.done = 0;
+ if (pthread_create(&evlist->thread.th, NULL, perf_evlist__poll_thread, evlist))
+ goto out_delete_evlist;
+
+ return 0;
+
+out_delete_evlist:
+ perf_evlist__delete(evlist);
+ evlist = NULL;
+ return -1;
+}
+
+void perf_evlist__stop_sb_thread(struct perf_evlist *evlist)
+{
+ if (!evlist)
+ return;
+ evlist->thread.done = 1;
+ pthread_join(evlist->thread.th, NULL);
+ perf_evlist__delete(evlist);
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 868294491194..49354fe24d5f 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -49,8 +49,15 @@ struct perf_evlist {
struct perf_evsel *selected;
struct events_stats stats;
struct perf_env *env;
+ void (*trace_event_sample_raw)(struct perf_evlist *evlist,
+ union perf_event *event,
+ struct perf_sample *sample);
u64 first_sample_time;
u64 last_sample_time;
+ struct {
+ pthread_t th;
+ volatile int done;
+ } thread;
};
struct perf_evsel_str_handler {
@@ -84,6 +91,14 @@ int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
int perf_evlist__add_dummy(struct perf_evlist *evlist);
+int perf_evlist__add_sb_event(struct perf_evlist **evlist,
+ struct perf_event_attr *attr,
+ perf_evsel__sb_cb_t cb,
+ void *data);
+int perf_evlist__start_sb_thread(struct perf_evlist *evlist,
+ struct target *target);
+void perf_evlist__stop_sb_thread(struct perf_evlist *evlist);
+
int perf_evlist__add_newtp(struct perf_evlist *evlist,
const char *sys, const char *name, void *handler);
@@ -162,7 +177,8 @@ unsigned long perf_event_mlock_kb_in_pages(void);
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite, int nr_cblocks);
+ bool auxtrace_overwrite, int nr_cblocks,
+ int affinity, int flush, int comp_level);
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
void perf_evlist__munmap(struct perf_evlist *evlist);
@@ -300,8 +316,6 @@ void perf_evlist__to_front(struct perf_evlist *evlist,
void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
struct perf_evsel *tracking_evsel);
-void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
-
struct perf_evsel *
perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
@@ -314,5 +328,4 @@ void perf_evlist__force_leader(struct perf_evlist *evlist);
struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evlist,
struct perf_evsel *evsel);
-
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index dbc0466db368..4a5947625c5c 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
*
* Parts came from builtin-{top,stat,record}.c, see those files for further
* copyright notes.
- *
- * Released under the GPL v2. (and only v2, not any later version)
*/
#include <byteswap.h>
@@ -294,25 +293,18 @@ struct perf_evsel *perf_evsel__new_cycles(bool precise)
if (!precise)
goto new_event;
- /*
- * Unnamed union member, not supported as struct member named
- * initializer in older compilers such as gcc 4.4.7
- *
- * Just for probing the precise_ip:
- */
- attr.sample_period = 1;
- perf_event_attr__set_max_precise_ip(&attr);
/*
* Now let the usual logic to set up the perf_event_attr defaults
* to kick in when we return and before perf_evsel__open() is called.
*/
- attr.sample_period = 0;
new_event:
evsel = perf_evsel__new(&attr);
if (evsel == NULL)
goto out;
+ evsel->precise_max = true;
+
/* use asprintf() because free(evsel) assumes name is allocated */
if (asprintf(&evsel->name, "cycles%s%s%.*s",
(attr.precise_ip || attr.exclude_kernel) ? ":" : "",
@@ -587,6 +579,12 @@ static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
}
+static int perf_evsel__tool_name(char *bf, size_t size)
+{
+ int ret = scnprintf(bf, size, "duration_time");
+ return ret;
+}
+
const char *perf_evsel__name(struct perf_evsel *evsel)
{
char bf[128];
@@ -608,7 +606,10 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
break;
case PERF_TYPE_SOFTWARE:
- perf_evsel__sw_name(evsel, bf, sizeof(bf));
+ if (evsel->tool_event)
+ perf_evsel__tool_name(bf, sizeof(bf));
+ else
+ perf_evsel__sw_name(evsel, bf, sizeof(bf));
break;
case PERF_TYPE_TRACEPOINT:
@@ -811,6 +812,8 @@ static void apply_config_terms(struct perf_evsel *evsel,
break;
case PERF_EVSEL__CONFIG_TERM_DRV_CFG:
break;
+ case PERF_EVSEL__CONFIG_TERM_PERCORE:
+ break;
default:
break;
}
@@ -956,6 +959,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->sample_freq = 0;
attr->sample_period = 0;
attr->write_backward = 0;
+
+ /*
+ * We don't get sample for slave events, we make them
+ * when delivering group leader sample. Set the slave
+ * event to follow the master sample_type to ease up
+ * report.
+ */
+ attr->sample_type = leader->attr.sample_type;
}
if (opts->no_samples)
@@ -1035,6 +1046,9 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->mmap = track;
attr->mmap2 = track && !perf_missing_features.mmap2;
attr->comm = track;
+ attr->ksymbol = track && !perf_missing_features.ksymbol;
+ attr->bpf_event = track && !opts->no_bpf_event &&
+ !perf_missing_features.bpf_event;
if (opts->record_namespaces)
attr->namespaces = track;
@@ -1080,7 +1094,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
}
if (evsel->precise_max)
- perf_event_attr__set_max_precise_ip(attr);
+ attr->precise_ip = 3;
if (opts->all_user) {
attr->exclude_kernel = 1;
@@ -1289,6 +1303,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
{
assert(list_empty(&evsel->node));
assert(evsel->evlist == NULL);
+ perf_evsel__free_counts(evsel);
perf_evsel__free_fd(evsel);
perf_evsel__free_id(evsel);
perf_evsel__free_config_terms(evsel);
@@ -1339,10 +1354,9 @@ void perf_counts_values__scale(struct perf_counts_values *count,
count->val = 0;
} else if (count->run < count->ena) {
scaled = 1;
- count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
+ count->val = (u64)((double) count->val * count->ena / count->run);
}
- } else
- count->ena = count->run = 0;
+ }
if (pscaled)
*pscaled = scaled;
@@ -1652,6 +1666,8 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(context_switch, p_unsigned);
PRINT_ATTRf(write_backward, p_unsigned);
PRINT_ATTRf(namespaces, p_unsigned);
+ PRINT_ATTRf(ksymbol, p_unsigned);
+ PRINT_ATTRf(bpf_event, p_unsigned);
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
PRINT_ATTRf(bp_type, p_unsigned);
@@ -1744,6 +1760,59 @@ static bool ignore_missing_thread(struct perf_evsel *evsel,
return true;
}
+static void display_attr(struct perf_event_attr *attr)
+{
+ if (verbose >= 2) {
+ fprintf(stderr, "%.60s\n", graph_dotted_line);
+ fprintf(stderr, "perf_event_attr:\n");
+ perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL);
+ fprintf(stderr, "%.60s\n", graph_dotted_line);
+ }
+}
+
+static int perf_event_open(struct perf_evsel *evsel,
+ pid_t pid, int cpu, int group_fd,
+ unsigned long flags)
+{
+ int precise_ip = evsel->attr.precise_ip;
+ int fd;
+
+ while (1) {
+ pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
+ pid, cpu, group_fd, flags);
+
+ fd = sys_perf_event_open(&evsel->attr, pid, cpu, group_fd, flags);
+ if (fd >= 0)
+ break;
+
+ /*
+ * Do quick precise_ip fallback if:
+ * - there is precise_ip set in perf_event_attr
+ * - maximum precise is requested
+ * - sys_perf_event_open failed with ENOTSUP error,
+ * which is associated with wrong precise_ip
+ */
+ if (!precise_ip || !evsel->precise_max || (errno != ENOTSUP))
+ break;
+
+ /*
+ * We tried all the precise_ip values, and it's
+ * still failing, so leave it to standard fallback.
+ */
+ if (!evsel->attr.precise_ip) {
+ evsel->attr.precise_ip = precise_ip;
+ break;
+ }
+
+ pr_debug2("\nsys_perf_event_open failed, error %d\n", -ENOTSUP);
+ evsel->attr.precise_ip--;
+ pr_debug2("decreasing precise_ip by one (%d)\n", evsel->attr.precise_ip);
+ display_attr(&evsel->attr);
+ }
+
+ return fd;
+}
+
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
struct thread_map *threads)
{
@@ -1811,16 +1880,15 @@ fallback_missing_features:
PERF_SAMPLE_BRANCH_NO_CYCLES);
if (perf_missing_features.group_read && evsel->attr.inherit)
evsel->attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
+ if (perf_missing_features.ksymbol)
+ evsel->attr.ksymbol = 0;
+ if (perf_missing_features.bpf_event)
+ evsel->attr.bpf_event = 0;
retry_sample_id:
if (perf_missing_features.sample_id_all)
evsel->attr.sample_id_all = 0;
- if (verbose >= 2) {
- fprintf(stderr, "%.60s\n", graph_dotted_line);
- fprintf(stderr, "perf_event_attr:\n");
- perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
- fprintf(stderr, "%.60s\n", graph_dotted_line);
- }
+ display_attr(&evsel->attr);
for (cpu = 0; cpu < cpus->nr; cpu++) {
@@ -1832,13 +1900,10 @@ retry_sample_id:
group_fd = get_group_fd(evsel, cpu, thread);
retry_open:
- pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
- pid, cpus->map[cpu], group_fd, flags);
-
test_attr__ready();
- fd = sys_perf_event_open(&evsel->attr, pid, cpus->map[cpu],
- group_fd, flags);
+ fd = perf_event_open(evsel, pid, cpus->map[cpu],
+ group_fd, flags);
FD(evsel, cpu, thread) = fd;
@@ -1930,7 +1995,15 @@ try_fallback:
* Must probe features in the order they were added to the
* perf_event_attr interface.
*/
- if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
+ if (!perf_missing_features.bpf_event && evsel->attr.bpf_event) {
+ perf_missing_features.bpf_event = true;
+ pr_debug2("switching off bpf_event\n");
+ goto fallback_missing_features;
+ } else if (!perf_missing_features.ksymbol && evsel->attr.ksymbol) {
+ perf_missing_features.ksymbol = true;
+ pr_debug2("switching off ksymbol\n");
+ goto fallback_missing_features;
+ } else if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
perf_missing_features.write_backward = true;
pr_debug2("switching off write_backward\n");
goto out_close;
@@ -2305,7 +2378,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
if (data->user_regs.abi) {
u64 mask = evsel->attr.sample_regs_user;
- sz = hweight_long(mask) * sizeof(u64);
+ sz = hweight64(mask) * sizeof(u64);
OVERFLOW_CHECK(array, sz, max_size);
data->user_regs.mask = mask;
data->user_regs.regs = (u64 *)array;
@@ -2361,7 +2434,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
u64 mask = evsel->attr.sample_regs_intr;
- sz = hweight_long(mask) * sizeof(u64);
+ sz = hweight64(mask) * sizeof(u64);
OVERFLOW_CHECK(array, sz, max_size);
data->intr_regs.mask = mask;
data->intr_regs.regs = (u64 *)array;
@@ -2489,7 +2562,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
if (type & PERF_SAMPLE_REGS_USER) {
if (sample->user_regs.abi) {
result += sizeof(u64);
- sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
+ sz = hweight64(sample->user_regs.mask) * sizeof(u64);
result += sz;
} else {
result += sizeof(u64);
@@ -2517,7 +2590,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
if (type & PERF_SAMPLE_REGS_INTR) {
if (sample->intr_regs.abi) {
result += sizeof(u64);
- sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
+ sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
result += sz;
} else {
result += sizeof(u64);
@@ -2647,7 +2720,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
if (type & PERF_SAMPLE_REGS_USER) {
if (sample->user_regs.abi) {
*array++ = sample->user_regs.abi;
- sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
+ sz = hweight64(sample->user_regs.mask) * sizeof(u64);
memcpy(array, sample->user_regs.regs, sz);
array = (void *)array + sz;
} else {
@@ -2683,7 +2756,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
if (type & PERF_SAMPLE_REGS_INTR) {
if (sample->intr_regs.abi) {
*array++ = sample->intr_regs.abi;
- sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
+ sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
memcpy(array, sample->intr_regs.regs, sz);
array = (void *)array + sz;
} else {
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 82a289ce8b0c..cad54e8ba522 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -8,7 +8,7 @@
#include <linux/perf_event.h>
#include <linux/types.h>
#include "xyarray.h"
-#include "symbol.h"
+#include "symbol_conf.h"
#include "cpumap.h"
#include "counts.h"
@@ -50,6 +50,7 @@ enum term_type {
PERF_EVSEL__CONFIG_TERM_OVERWRITE,
PERF_EVSEL__CONFIG_TERM_DRV_CFG,
PERF_EVSEL__CONFIG_TERM_BRANCH,
+ PERF_EVSEL__CONFIG_TERM_PERCORE,
};
struct perf_evsel_config_term {
@@ -67,12 +68,20 @@ struct perf_evsel_config_term {
bool overwrite;
char *branch;
unsigned long max_events;
+ bool percore;
} val;
bool weak;
};
struct perf_stat_evsel;
+typedef int (perf_evsel__sb_cb_t)(union perf_event *event, void *data);
+
+enum perf_tool_event {
+ PERF_TOOL_NONE = 0,
+ PERF_TOOL_DURATION_TIME = 1,
+};
+
/** struct perf_evsel - event selector
*
* @evlist - evlist this evsel is in, if it is in one.
@@ -119,6 +128,7 @@ struct perf_evsel {
unsigned int sample_size;
int id_pos;
int is_pos;
+ enum perf_tool_event tool_event;
bool uniquified_name;
bool snapshot;
bool supported;
@@ -150,7 +160,12 @@ struct perf_evsel {
struct perf_evsel **metric_events;
bool collect_stat;
bool weak_group;
+ bool percore;
const char *pmu_name;
+ struct {
+ perf_evsel__sb_cb_t *cb;
+ void *data;
+ } side_band;
};
union u64_swap {
@@ -168,6 +183,8 @@ struct perf_missing_features {
bool lbr_flags;
bool write_backward;
bool group_read;
+ bool ksymbol;
+ bool bpf_event;
};
extern struct perf_missing_features perf_missing_features;
diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
index aafbe54fd3fa..7001247ebbd6 100644
--- a/tools/perf/util/genelf.c
+++ b/tools/perf/util/genelf.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* genelf.c
* Copyright (C) 2014, Google, Inc
*
* Contributed by:
* Stephane Eranian <eranian@gmail.com>
- *
- * Released under the GPL v2. (and only v2, not any later version)
*/
#include <sys/types.h>
diff --git a/tools/perf/util/genelf_debug.c b/tools/perf/util/genelf_debug.c
index 40789d8603d0..995e490c17fa 100644
--- a/tools/perf/util/genelf_debug.c
+++ b/tools/perf/util/genelf_debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* genelf_debug.c
* Copyright (C) 2015, Google, Inc
@@ -5,8 +6,6 @@
* Contributed by:
* Stephane Eranian <eranian@google.com>
*
- * Released under the GPL v2.
- *
* based on GPLv2 source code from Oprofile
* @remark Copyright 2007 OProfile authors
* @author Philippe Elie
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index dec6d218c31c..fb0aa661644b 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -18,6 +18,7 @@
#include <sys/utsname.h>
#include <linux/time64.h>
#include <dirent.h>
+#include <bpf/libbpf.h>
#include "evlist.h"
#include "evsel.h"
@@ -39,6 +40,8 @@
#include "tool.h"
#include "time-utils.h"
#include "units.h"
+#include "cputopo.h"
+#include "bpf-event.h"
#include "sane_ctype.h"
@@ -526,17 +529,11 @@ static int write_event_desc(struct feat_fd *ff,
static int write_cmdline(struct feat_fd *ff,
struct perf_evlist *evlist __maybe_unused)
{
- char buf[MAXPATHLEN];
- u32 n;
- int i, ret;
+ char pbuf[MAXPATHLEN], *buf;
+ int i, ret, n;
/* actual path to perf binary */
- ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
- if (ret <= 0)
- return -1;
-
- /* readlink() does not add null termination */
- buf[ret] = '\0';
+ buf = perf_exe(pbuf, MAXPATHLEN);
/* account for binary path */
n = perf_env.nr_cmdline + 1;
@@ -557,160 +554,15 @@ static int write_cmdline(struct feat_fd *ff,
return 0;
}
-#define CORE_SIB_FMT \
- "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
-#define THRD_SIB_FMT \
- "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
-
-struct cpu_topo {
- u32 cpu_nr;
- u32 core_sib;
- u32 thread_sib;
- char **core_siblings;
- char **thread_siblings;
-};
-
-static int build_cpu_topo(struct cpu_topo *tp, int cpu)
-{
- FILE *fp;
- char filename[MAXPATHLEN];
- char *buf = NULL, *p;
- size_t len = 0;
- ssize_t sret;
- u32 i = 0;
- int ret = -1;
-
- sprintf(filename, CORE_SIB_FMT, cpu);
- fp = fopen(filename, "r");
- if (!fp)
- goto try_threads;
-
- sret = getline(&buf, &len, fp);
- fclose(fp);
- if (sret <= 0)
- goto try_threads;
-
- p = strchr(buf, '\n');
- if (p)
- *p = '\0';
-
- for (i = 0; i < tp->core_sib; i++) {
- if (!strcmp(buf, tp->core_siblings[i]))
- break;
- }
- if (i == tp->core_sib) {
- tp->core_siblings[i] = buf;
- tp->core_sib++;
- buf = NULL;
- len = 0;
- }
- ret = 0;
-
-try_threads:
- sprintf(filename, THRD_SIB_FMT, cpu);
- fp = fopen(filename, "r");
- if (!fp)
- goto done;
-
- if (getline(&buf, &len, fp) <= 0)
- goto done;
-
- p = strchr(buf, '\n');
- if (p)
- *p = '\0';
-
- for (i = 0; i < tp->thread_sib; i++) {
- if (!strcmp(buf, tp->thread_siblings[i]))
- break;
- }
- if (i == tp->thread_sib) {
- tp->thread_siblings[i] = buf;
- tp->thread_sib++;
- buf = NULL;
- }
- ret = 0;
-done:
- if(fp)
- fclose(fp);
- free(buf);
- return ret;
-}
-
-static void free_cpu_topo(struct cpu_topo *tp)
-{
- u32 i;
-
- if (!tp)
- return;
-
- for (i = 0 ; i < tp->core_sib; i++)
- zfree(&tp->core_siblings[i]);
-
- for (i = 0 ; i < tp->thread_sib; i++)
- zfree(&tp->thread_siblings[i]);
-
- free(tp);
-}
-
-static struct cpu_topo *build_cpu_topology(void)
-{
- struct cpu_topo *tp = NULL;
- void *addr;
- u32 nr, i;
- size_t sz;
- long ncpus;
- int ret = -1;
- struct cpu_map *map;
-
- ncpus = cpu__max_present_cpu();
-
- /* build online CPU map */
- map = cpu_map__new(NULL);
- if (map == NULL) {
- pr_debug("failed to get system cpumap\n");
- return NULL;
- }
-
- nr = (u32)(ncpus & UINT_MAX);
-
- sz = nr * sizeof(char *);
- addr = calloc(1, sizeof(*tp) + 2 * sz);
- if (!addr)
- goto out_free;
-
- tp = addr;
- tp->cpu_nr = nr;
- addr += sizeof(*tp);
- tp->core_siblings = addr;
- addr += sz;
- tp->thread_siblings = addr;
-
- for (i = 0; i < nr; i++) {
- if (!cpu_map__has(map, i))
- continue;
-
- ret = build_cpu_topo(tp, i);
- if (ret < 0)
- break;
- }
-
-out_free:
- cpu_map__put(map);
- if (ret) {
- free_cpu_topo(tp);
- tp = NULL;
- }
- return tp;
-}
static int write_cpu_topology(struct feat_fd *ff,
struct perf_evlist *evlist __maybe_unused)
{
- struct cpu_topo *tp;
+ struct cpu_topology *tp;
u32 i;
int ret, j;
- tp = build_cpu_topology();
+ tp = cpu_topology__new();
if (!tp)
return -1;
@@ -748,7 +600,7 @@ static int write_cpu_topology(struct feat_fd *ff,
return ret;
}
done:
- free_cpu_topo(tp);
+ cpu_topology__delete(tp);
return ret;
}
@@ -783,112 +635,45 @@ static int write_total_mem(struct feat_fd *ff,
return ret;
}
-static int write_topo_node(struct feat_fd *ff, int node)
-{
- char str[MAXPATHLEN];
- char field[32];
- char *buf = NULL, *p;
- size_t len = 0;
- FILE *fp;
- u64 mem_total, mem_free, mem;
- int ret = -1;
-
- sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
- fp = fopen(str, "r");
- if (!fp)
- return -1;
-
- while (getline(&buf, &len, fp) > 0) {
- /* skip over invalid lines */
- if (!strchr(buf, ':'))
- continue;
- if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
- goto done;
- if (!strcmp(field, "MemTotal:"))
- mem_total = mem;
- if (!strcmp(field, "MemFree:"))
- mem_free = mem;
- }
-
- fclose(fp);
- fp = NULL;
-
- ret = do_write(ff, &mem_total, sizeof(u64));
- if (ret)
- goto done;
-
- ret = do_write(ff, &mem_free, sizeof(u64));
- if (ret)
- goto done;
-
- ret = -1;
- sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
-
- fp = fopen(str, "r");
- if (!fp)
- goto done;
-
- if (getline(&buf, &len, fp) <= 0)
- goto done;
-
- p = strchr(buf, '\n');
- if (p)
- *p = '\0';
-
- ret = do_write_string(ff, buf);
-done:
- free(buf);
- if (fp)
- fclose(fp);
- return ret;
-}
-
static int write_numa_topology(struct feat_fd *ff,
struct perf_evlist *evlist __maybe_unused)
{
- char *buf = NULL;
- size_t len = 0;
- FILE *fp;
- struct cpu_map *node_map = NULL;
- char *c;
- u32 nr, i, j;
+ struct numa_topology *tp;
int ret = -1;
+ u32 i;
- fp = fopen("/sys/devices/system/node/online", "r");
- if (!fp)
- return -1;
-
- if (getline(&buf, &len, fp) <= 0)
- goto done;
+ tp = numa_topology__new();
+ if (!tp)
+ return -ENOMEM;
- c = strchr(buf, '\n');
- if (c)
- *c = '\0';
+ ret = do_write(ff, &tp->nr, sizeof(u32));
+ if (ret < 0)
+ goto err;
- node_map = cpu_map__new(buf);
- if (!node_map)
- goto done;
+ for (i = 0; i < tp->nr; i++) {
+ struct numa_topology_node *n = &tp->nodes[i];
- nr = (u32)node_map->nr;
+ ret = do_write(ff, &n->node, sizeof(u32));
+ if (ret < 0)
+ goto err;
- ret = do_write(ff, &nr, sizeof(nr));
- if (ret < 0)
- goto done;
+ ret = do_write(ff, &n->mem_total, sizeof(u64));
+ if (ret)
+ goto err;
- for (i = 0; i < nr; i++) {
- j = (u32)node_map->map[i];
- ret = do_write(ff, &j, sizeof(j));
- if (ret < 0)
- break;
+ ret = do_write(ff, &n->mem_free, sizeof(u64));
+ if (ret)
+ goto err;
- ret = write_topo_node(ff, i);
+ ret = do_write_string(ff, n->cpus);
if (ret < 0)
- break;
+ goto err;
}
-done:
- free(buf);
- fclose(fp);
- cpu_map__put(node_map);
+
+ ret = 0;
+
+err:
+ numa_topology__delete(tp);
return ret;
}
@@ -1042,11 +827,9 @@ static int write_cpuid(struct feat_fd *ff,
int ret;
ret = get_cpuid(buffer, sizeof(buffer));
- if (!ret)
- goto write_it;
+ if (ret)
+ return -1;
- return -1;
-write_it:
return do_write_string(ff, buffer);
}
@@ -1080,6 +863,104 @@ static int write_clockid(struct feat_fd *ff,
sizeof(ff->ph->env.clockid_res_ns));
}
+static int write_dir_format(struct feat_fd *ff,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_session *session;
+ struct perf_data *data;
+
+ session = container_of(ff->ph, struct perf_session, header);
+ data = session->data;
+
+ if (WARN_ON(!perf_data__is_dir(data)))
+ return -1;
+
+ return do_write(ff, &data->dir.version, sizeof(data->dir.version));
+}
+
+#ifdef HAVE_LIBBPF_SUPPORT
+static int write_bpf_prog_info(struct feat_fd *ff,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct rb_root *root;
+ struct rb_node *next;
+ int ret;
+
+ down_read(&env->bpf_progs.lock);
+
+ ret = do_write(ff, &env->bpf_progs.infos_cnt,
+ sizeof(env->bpf_progs.infos_cnt));
+ if (ret < 0)
+ goto out;
+
+ root = &env->bpf_progs.infos;
+ next = rb_first(root);
+ while (next) {
+ struct bpf_prog_info_node *node;
+ size_t len;
+
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ next = rb_next(&node->rb_node);
+ len = sizeof(struct bpf_prog_info_linear) +
+ node->info_linear->data_len;
+
+ /* before writing to file, translate address to offset */
+ bpf_program__bpil_addr_to_offs(node->info_linear);
+ ret = do_write(ff, node->info_linear, len);
+ /*
+ * translate back to address even when do_write() fails,
+ * so that this function never changes the data.
+ */
+ bpf_program__bpil_offs_to_addr(node->info_linear);
+ if (ret < 0)
+ goto out;
+ }
+out:
+ up_read(&env->bpf_progs.lock);
+ return ret;
+}
+#else // HAVE_LIBBPF_SUPPORT
+static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+
+static int write_bpf_btf(struct feat_fd *ff,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct rb_root *root;
+ struct rb_node *next;
+ int ret;
+
+ down_read(&env->bpf_progs.lock);
+
+ ret = do_write(ff, &env->bpf_progs.btfs_cnt,
+ sizeof(env->bpf_progs.btfs_cnt));
+
+ if (ret < 0)
+ goto out;
+
+ root = &env->bpf_progs.btfs;
+ next = rb_first(root);
+ while (next) {
+ struct btf_node *node;
+
+ node = rb_entry(next, struct btf_node, rb_node);
+ next = rb_next(&node->rb_node);
+ ret = do_write(ff, &node->id,
+ sizeof(u32) * 2 + node->data_size);
+ if (ret < 0)
+ goto out;
+ }
+out:
+ up_read(&env->bpf_progs.lock);
+ return ret;
+}
+
static int cpu_cache_level__sort(const void *a, const void *b)
{
struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
@@ -1463,6 +1344,30 @@ out:
return ret;
}
+static int write_compressed(struct feat_fd *ff __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ int ret;
+
+ ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
+ if (ret)
+ return ret;
+
+ ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
+ if (ret)
+ return ret;
+
+ ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
+ if (ret)
+ return ret;
+
+ ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
+ if (ret)
+ return ret;
+
+ return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
+}
+
static void print_hostname(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
@@ -1560,6 +1465,63 @@ static void print_clockid(struct feat_fd *ff, FILE *fp)
ff->ph->env.clockid_res_ns * 1000);
}
+static void print_dir_format(struct feat_fd *ff, FILE *fp)
+{
+ struct perf_session *session;
+ struct perf_data *data;
+
+ session = container_of(ff->ph, struct perf_session, header);
+ data = session->data;
+
+ fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
+}
+
+static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct rb_root *root;
+ struct rb_node *next;
+
+ down_read(&env->bpf_progs.lock);
+
+ root = &env->bpf_progs.infos;
+ next = rb_first(root);
+
+ while (next) {
+ struct bpf_prog_info_node *node;
+
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ next = rb_next(&node->rb_node);
+
+ bpf_event__print_bpf_prog_info(&node->info_linear->info,
+ env, fp);
+ }
+
+ up_read(&env->bpf_progs.lock);
+}
+
+static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct rb_root *root;
+ struct rb_node *next;
+
+ down_read(&env->bpf_progs.lock);
+
+ root = &env->bpf_progs.btfs;
+ next = rb_first(root);
+
+ while (next) {
+ struct btf_node *node;
+
+ node = rb_entry(next, struct btf_node, rb_node);
+ next = rb_next(&node->rb_node);
+ fprintf(fp, "# btf info of id %u\n", node->id);
+ }
+
+ up_read(&env->bpf_progs.lock);
+}
+
static void free_event_desc(struct perf_evsel *events)
{
struct perf_evsel *evsel;
@@ -1750,6 +1712,13 @@ static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
}
}
+static void print_compressed(struct feat_fd *ff, FILE *fp)
+{
+ fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
+ ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
+ ff->ph->env.comp_level, ff->ph->env.comp_ratio);
+}
+
static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
{
const char *delimiter = "# pmu mappings: ";
@@ -2592,6 +2561,164 @@ static int process_clockid(struct feat_fd *ff,
return 0;
}
+static int process_dir_format(struct feat_fd *ff,
+ void *_data __maybe_unused)
+{
+ struct perf_session *session;
+ struct perf_data *data;
+
+ session = container_of(ff->ph, struct perf_session, header);
+ data = session->data;
+
+ if (WARN_ON(!perf_data__is_dir(data)))
+ return -1;
+
+ return do_read_u64(ff, &data->dir.version);
+}
+
+#ifdef HAVE_LIBBPF_SUPPORT
+static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_info_node *info_node;
+ struct perf_env *env = &ff->ph->env;
+ u32 count, i;
+ int err = -1;
+
+ if (ff->ph->needs_swap) {
+ pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
+ return 0;
+ }
+
+ if (do_read_u32(ff, &count))
+ return -1;
+
+ down_write(&env->bpf_progs.lock);
+
+ for (i = 0; i < count; ++i) {
+ u32 info_len, data_len;
+
+ info_linear = NULL;
+ info_node = NULL;
+ if (do_read_u32(ff, &info_len))
+ goto out;
+ if (do_read_u32(ff, &data_len))
+ goto out;
+
+ if (info_len > sizeof(struct bpf_prog_info)) {
+ pr_warning("detected invalid bpf_prog_info\n");
+ goto out;
+ }
+
+ info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
+ data_len);
+ if (!info_linear)
+ goto out;
+ info_linear->info_len = sizeof(struct bpf_prog_info);
+ info_linear->data_len = data_len;
+ if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
+ goto out;
+ if (__do_read(ff, &info_linear->info, info_len))
+ goto out;
+ if (info_len < sizeof(struct bpf_prog_info))
+ memset(((void *)(&info_linear->info)) + info_len, 0,
+ sizeof(struct bpf_prog_info) - info_len);
+
+ if (__do_read(ff, info_linear->data, data_len))
+ goto out;
+
+ info_node = malloc(sizeof(struct bpf_prog_info_node));
+ if (!info_node)
+ goto out;
+
+ /* after reading from file, translate offset to address */
+ bpf_program__bpil_offs_to_addr(info_linear);
+ info_node->info_linear = info_linear;
+ perf_env__insert_bpf_prog_info(env, info_node);
+ }
+
+ up_write(&env->bpf_progs.lock);
+ return 0;
+out:
+ free(info_linear);
+ free(info_node);
+ up_write(&env->bpf_progs.lock);
+ return err;
+}
+#else // HAVE_LIBBPF_SUPPORT
+static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
+{
+ return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+
+static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
+{
+ struct perf_env *env = &ff->ph->env;
+ struct btf_node *node = NULL;
+ u32 count, i;
+ int err = -1;
+
+ if (ff->ph->needs_swap) {
+ pr_warning("interpreting btf from systems with endianity is not yet supported\n");
+ return 0;
+ }
+
+ if (do_read_u32(ff, &count))
+ return -1;
+
+ down_write(&env->bpf_progs.lock);
+
+ for (i = 0; i < count; ++i) {
+ u32 id, data_size;
+
+ if (do_read_u32(ff, &id))
+ goto out;
+ if (do_read_u32(ff, &data_size))
+ goto out;
+
+ node = malloc(sizeof(struct btf_node) + data_size);
+ if (!node)
+ goto out;
+
+ node->id = id;
+ node->data_size = data_size;
+
+ if (__do_read(ff, node->data, data_size))
+ goto out;
+
+ perf_env__insert_btf(env, node);
+ node = NULL;
+ }
+
+ err = 0;
+out:
+ up_write(&env->bpf_progs.lock);
+ free(node);
+ return err;
+}
+
+static int process_compressed(struct feat_fd *ff,
+ void *data __maybe_unused)
+{
+ if (do_read_u32(ff, &(ff->ph->env.comp_ver)))
+ return -1;
+
+ if (do_read_u32(ff, &(ff->ph->env.comp_type)))
+ return -1;
+
+ if (do_read_u32(ff, &(ff->ph->env.comp_level)))
+ return -1;
+
+ if (do_read_u32(ff, &(ff->ph->env.comp_ratio)))
+ return -1;
+
+ if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len)))
+ return -1;
+
+ return 0;
+}
+
struct feature_ops {
int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
void (*print)(struct feat_fd *ff, FILE *fp);
@@ -2651,7 +2778,11 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPN(CACHE, cache, true),
FEAT_OPR(SAMPLE_TIME, sample_time, false),
FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
- FEAT_OPR(CLOCKID, clockid, false)
+ FEAT_OPR(CLOCKID, clockid, false),
+ FEAT_OPN(DIR_FORMAT, dir_format, false),
+ FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
+ FEAT_OPR(BPF_BTF, bpf_btf, false),
+ FEAT_OPR(COMPRESSED, compressed, false),
};
struct header_print_data {
@@ -3471,6 +3602,7 @@ int perf_event__synthesize_features(struct perf_tool *tool,
return -ENOMEM;
ff.size = sz - sz_hdr;
+ ff.ph = &session->header;
for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
if (!feat_ops[feat].synthesize) {
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 0d553ddca0a3..5b3abe4172e2 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -39,6 +39,10 @@ enum {
HEADER_SAMPLE_TIME,
HEADER_MEM_TOPOLOGY,
HEADER_CLOCKID,
+ HEADER_DIR_FORMAT,
+ HEADER_BPF_PROG_INFO,
+ HEADER_BPF_BTF,
+ HEADER_COMPRESSED,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
@@ -48,6 +52,10 @@ enum perf_header_version {
PERF_HEADER_VERSION_2,
};
+enum perf_dir_version {
+ PERF_DIR_VERSION = 1,
+};
+
struct perf_file_section {
u64 offset;
u64 size;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 8aad8330e392..7ace7a10054d 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include "callchain.h"
#include "util.h"
#include "build-id.h"
#include "hist.h"
@@ -11,12 +12,14 @@
#include "evsel.h"
#include "annotate.h"
#include "srcline.h"
+#include "symbol.h"
#include "thread.h"
#include "ui/progress.h"
#include <errno.h>
#include <math.h>
#include <inttypes.h>
#include <sys/param.h>
+#include <linux/time64.h>
static bool hists__filter_entry_by_dso(struct hists *hists,
struct hist_entry *he);
@@ -190,6 +193,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
+ hists__new_col_len(hists, HISTC_TIME, 12);
if (h->srcline) {
len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
@@ -209,7 +213,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
{
- struct rb_node *next = rb_first(&hists->entries);
+ struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
int row = 0;
@@ -244,6 +248,14 @@ static void he_stat__add_cpumode_period(struct he_stat *he_stat,
}
}
+static long hist_time(unsigned long htime)
+{
+ unsigned long time_quantum = symbol_conf.time_quantum;
+ if (time_quantum)
+ return (htime / time_quantum) * time_quantum;
+ return htime;
+}
+
static void he_stat__add_period(struct he_stat *he_stat, u64 period,
u64 weight)
{
@@ -296,7 +308,7 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
if (!he->leaf) {
struct hist_entry *child;
- struct rb_node *node = rb_first(&he->hroot_out);
+ struct rb_node *node = rb_first_cached(&he->hroot_out);
while (node) {
child = rb_entry(node, struct hist_entry, rb_node);
node = rb_next(node);
@@ -311,8 +323,8 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
{
- struct rb_root *root_in;
- struct rb_root *root_out;
+ struct rb_root_cached *root_in;
+ struct rb_root_cached *root_out;
if (he->parent_he) {
root_in = &he->parent_he->hroot_in;
@@ -325,8 +337,8 @@ static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
root_out = &hists->entries;
}
- rb_erase(&he->rb_node_in, root_in);
- rb_erase(&he->rb_node, root_out);
+ rb_erase_cached(&he->rb_node_in, root_in);
+ rb_erase_cached(&he->rb_node, root_out);
--hists->nr_entries;
if (!he->filtered)
@@ -337,7 +349,7 @@ static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
{
- struct rb_node *next = rb_first(&hists->entries);
+ struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
while (next) {
@@ -353,7 +365,7 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
void hists__delete_entries(struct hists *hists)
{
- struct rb_node *next = rb_first(&hists->entries);
+ struct rb_node *next = rb_first_cached(&hists->entries);
struct hist_entry *n;
while (next) {
@@ -394,11 +406,8 @@ static int hist_entry__init(struct hist_entry *he,
* adding new entries. So we need to save a copy.
*/
he->branch_info = malloc(sizeof(*he->branch_info));
- if (he->branch_info == NULL) {
- map__zput(he->ms.map);
- free(he->stat_acc);
- return -ENOMEM;
- }
+ if (he->branch_info == NULL)
+ goto err;
memcpy(he->branch_info, template->branch_info,
sizeof(*he->branch_info));
@@ -417,31 +426,53 @@ static int hist_entry__init(struct hist_entry *he,
if (he->raw_data) {
he->raw_data = memdup(he->raw_data, he->raw_size);
+ if (he->raw_data == NULL)
+ goto err_infos;
+ }
- if (he->raw_data == NULL) {
- map__put(he->ms.map);
- if (he->branch_info) {
- map__put(he->branch_info->from.map);
- map__put(he->branch_info->to.map);
- free(he->branch_info);
- }
- if (he->mem_info) {
- map__put(he->mem_info->iaddr.map);
- map__put(he->mem_info->daddr.map);
- }
- free(he->stat_acc);
- return -ENOMEM;
- }
+ if (he->srcline) {
+ he->srcline = strdup(he->srcline);
+ if (he->srcline == NULL)
+ goto err_rawdata;
+ }
+
+ if (symbol_conf.res_sample) {
+ he->res_samples = calloc(sizeof(struct res_sample),
+ symbol_conf.res_sample);
+ if (!he->res_samples)
+ goto err_srcline;
}
+
INIT_LIST_HEAD(&he->pairs.node);
thread__get(he->thread);
- he->hroot_in = RB_ROOT;
- he->hroot_out = RB_ROOT;
+ he->hroot_in = RB_ROOT_CACHED;
+ he->hroot_out = RB_ROOT_CACHED;
if (!symbol_conf.report_hierarchy)
he->leaf = true;
return 0;
+
+err_srcline:
+ free(he->srcline);
+
+err_rawdata:
+ free(he->raw_data);
+
+err_infos:
+ if (he->branch_info) {
+ map__put(he->branch_info->from.map);
+ map__put(he->branch_info->to.map);
+ free(he->branch_info);
+ }
+ if (he->mem_info) {
+ map__put(he->mem_info->iaddr.map);
+ map__put(he->mem_info->daddr.map);
+ }
+err:
+ map__zput(he->ms.map);
+ free(he->stat_acc);
+ return -ENOMEM;
}
static void *hist_entry__zalloc(size_t size)
@@ -513,8 +544,9 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
int64_t cmp;
u64 period = entry->stat.period;
u64 weight = entry->stat.weight;
+ bool leftmost = true;
- p = &hists->entries_in->rb_node;
+ p = &hists->entries_in->rb_root.rb_node;
while (*p != NULL) {
parent = *p;
@@ -557,8 +589,10 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
if (cmp < 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
he = hist_entry__new(entry, sample_self);
@@ -570,7 +604,7 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
- rb_insert_color(&he->rb_node_in, hists->entries_in);
+ rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
out:
if (sample_self)
he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
@@ -579,6 +613,32 @@ out:
return he;
}
+static unsigned random_max(unsigned high)
+{
+ unsigned thresh = -high % high;
+ for (;;) {
+ unsigned r = random();
+ if (r >= thresh)
+ return r % high;
+ }
+}
+
+static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
+{
+ struct res_sample *r;
+ int j;
+
+ if (he->num_res < symbol_conf.res_sample) {
+ j = he->num_res++;
+ } else {
+ j = random_max(symbol_conf.res_sample);
+ }
+ r = &he->res_samples[j];
+ r->time = sample->time;
+ r->cpu = sample->cpu;
+ r->tid = sample->tid;
+}
+
static struct hist_entry*
__hists__add_entry(struct hists *hists,
struct addr_location *al,
@@ -601,7 +661,7 @@ __hists__add_entry(struct hists *hists,
.map = al->map,
.sym = al->sym,
},
- .srcline = al->srcline ? strdup(al->srcline) : NULL,
+ .srcline = (char *) al->srcline,
.socket = al->socket,
.cpu = al->cpu,
.cpumode = al->cpumode,
@@ -621,10 +681,13 @@ __hists__add_entry(struct hists *hists,
.raw_data = sample->raw_data,
.raw_size = sample->raw_size,
.ops = ops,
+ .time = hist_time(sample->time),
}, *he = hists__findnew_entry(hists, &entry, al, sample_self);
if (!hists->has_callchains && he && he->callchain_size != 0)
hists->has_callchains = true;
+ if (he && symbol_conf.res_sample)
+ hists__res_sample(he, sample);
return he;
}
@@ -958,7 +1021,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
.map = al->map,
.sym = al->sym,
},
- .srcline = al->srcline ? strdup(al->srcline) : NULL,
+ .srcline = (char *) al->srcline,
.parent = iter->parent,
.raw_data = sample->raw_data,
.raw_size = sample->raw_size,
@@ -1048,8 +1111,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
iter->evsel, al, max_stack_depth);
- if (err)
+ if (err) {
+ map__put(alm);
return err;
+ }
err = iter->ops->prepare_entry(iter, al);
if (err)
@@ -1148,6 +1213,7 @@ void hist_entry__delete(struct hist_entry *he)
mem_info__zput(he->mem_info);
}
+ zfree(&he->res_samples);
zfree(&he->stat_acc);
free_srcline(he->srcline);
if (he->srcfile && he->srcfile[0])
@@ -1279,16 +1345,17 @@ static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
}
static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct hist_entry *he,
struct hist_entry *parent_he,
struct perf_hpp_list *hpp_list)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter, *new;
struct perf_hpp_fmt *fmt;
int64_t cmp;
+ bool leftmost = true;
while (*p != NULL) {
parent = *p;
@@ -1308,8 +1375,10 @@ static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
if (cmp < 0)
p = &parent->rb_left;
- else
+ else {
p = &parent->rb_right;
+ leftmost = false;
+ }
}
new = hist_entry__new(he, true);
@@ -1343,12 +1412,12 @@ static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
}
rb_link_node(&new->rb_node_in, parent, p);
- rb_insert_color(&new->rb_node_in, root);
+ rb_insert_color_cached(&new->rb_node_in, root, leftmost);
return new;
}
static int hists__hierarchy_insert_entry(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct hist_entry *he)
{
struct perf_hpp_list_node *node;
@@ -1395,13 +1464,14 @@ static int hists__hierarchy_insert_entry(struct hists *hists,
}
static int hists__collapse_insert_entry(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct hist_entry *he)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
int64_t cmp;
+ bool leftmost = true;
if (symbol_conf.report_hierarchy)
return hists__hierarchy_insert_entry(hists, root, he);
@@ -1432,19 +1502,21 @@ static int hists__collapse_insert_entry(struct hists *hists,
if (cmp < 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
- rb_insert_color(&he->rb_node_in, root);
+ rb_insert_color_cached(&he->rb_node_in, root, leftmost);
return 1;
}
-struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
+struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
pthread_mutex_lock(&hists->lock);
@@ -1467,7 +1539,7 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *next;
struct hist_entry *n;
int ret;
@@ -1479,7 +1551,7 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
root = hists__get_rotate_entries_in(hists);
- next = rb_first(root);
+ next = rb_first_cached(root);
while (next) {
if (session_done())
@@ -1487,7 +1559,7 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
- rb_erase(&n->rb_node_in, root);
+ rb_erase_cached(&n->rb_node_in, root);
ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
if (ret < 0)
return -1;
@@ -1558,7 +1630,7 @@ static void hierarchy_recalc_total_periods(struct hists *hists)
struct rb_node *node;
struct hist_entry *he;
- node = rb_first(&hists->entries);
+ node = rb_first_cached(&hists->entries);
hists->stats.total_period = 0;
hists->stats.total_non_filtered_period = 0;
@@ -1578,13 +1650,14 @@ static void hierarchy_recalc_total_periods(struct hists *hists)
}
}
-static void hierarchy_insert_output_entry(struct rb_root *root,
+static void hierarchy_insert_output_entry(struct rb_root_cached *root,
struct hist_entry *he)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
struct perf_hpp_fmt *fmt;
+ bool leftmost = true;
while (*p != NULL) {
parent = *p;
@@ -1592,12 +1665,14 @@ static void hierarchy_insert_output_entry(struct rb_root *root,
if (hist_entry__sort(he, iter) > 0)
p = &parent->rb_left;
- else
+ else {
p = &parent->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, root);
+ rb_insert_color_cached(&he->rb_node, root, leftmost);
/* update column width of dynamic entry */
perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
@@ -1608,16 +1683,16 @@ static void hierarchy_insert_output_entry(struct rb_root *root,
static void hists__hierarchy_output_resort(struct hists *hists,
struct ui_progress *prog,
- struct rb_root *root_in,
- struct rb_root *root_out,
+ struct rb_root_cached *root_in,
+ struct rb_root_cached *root_out,
u64 min_callchain_hits,
bool use_callchain)
{
struct rb_node *node;
struct hist_entry *he;
- *root_out = RB_ROOT;
- node = rb_first(root_in);
+ *root_out = RB_ROOT_CACHED;
+ node = rb_first_cached(root_in);
while (node) {
he = rb_entry(node, struct hist_entry, rb_node_in);
@@ -1660,15 +1735,16 @@ static void hists__hierarchy_output_resort(struct hists *hists,
}
}
-static void __hists__insert_output_entry(struct rb_root *entries,
+static void __hists__insert_output_entry(struct rb_root_cached *entries,
struct hist_entry *he,
u64 min_callchain_hits,
bool use_callchain)
{
- struct rb_node **p = &entries->rb_node;
+ struct rb_node **p = &entries->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
struct perf_hpp_fmt *fmt;
+ bool leftmost = true;
if (use_callchain) {
if (callchain_param.mode == CHAIN_GRAPH_REL) {
@@ -1689,12 +1765,14 @@ static void __hists__insert_output_entry(struct rb_root *entries,
if (hist_entry__sort(he, iter) > 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, entries);
+ rb_insert_color_cached(&he->rb_node, entries, leftmost);
perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
if (perf_hpp__is_dynamic_entry(fmt) &&
@@ -1704,9 +1782,10 @@ static void __hists__insert_output_entry(struct rb_root *entries,
}
static void output_resort(struct hists *hists, struct ui_progress *prog,
- bool use_callchain, hists__resort_cb_t cb)
+ bool use_callchain, hists__resort_cb_t cb,
+ void *cb_arg)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *next;
struct hist_entry *n;
u64 callchain_total;
@@ -1736,14 +1815,14 @@ static void output_resort(struct hists *hists, struct ui_progress *prog,
else
root = hists->entries_in;
- next = rb_first(root);
- hists->entries = RB_ROOT;
+ next = rb_first_cached(root);
+ hists->entries = RB_ROOT_CACHED;
while (next) {
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
- if (cb && cb(n))
+ if (cb && cb(n, cb_arg))
continue;
__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
@@ -1757,7 +1836,8 @@ static void output_resort(struct hists *hists, struct ui_progress *prog,
}
}
-void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
+void perf_evsel__output_resort_cb(struct perf_evsel *evsel, struct ui_progress *prog,
+ hists__resort_cb_t cb, void *cb_arg)
{
bool use_callchain;
@@ -1768,18 +1848,23 @@ void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *pro
use_callchain |= symbol_conf.show_branchflag_count;
- output_resort(evsel__hists(evsel), prog, use_callchain, NULL);
+ output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
+}
+
+void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
+{
+ return perf_evsel__output_resort_cb(evsel, prog, NULL, NULL);
}
void hists__output_resort(struct hists *hists, struct ui_progress *prog)
{
- output_resort(hists, prog, symbol_conf.use_callchain, NULL);
+ output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
}
void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
hists__resort_cb_t cb)
{
- output_resort(hists, prog, symbol_conf.use_callchain, cb);
+ output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
}
static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
@@ -1798,7 +1883,7 @@ struct rb_node *rb_hierarchy_last(struct rb_node *node)
struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
while (can_goto_child(he, HMD_NORMAL)) {
- node = rb_last(&he->hroot_out);
+ node = rb_last(&he->hroot_out.rb_root);
he = rb_entry(node, struct hist_entry, rb_node);
}
return node;
@@ -1809,7 +1894,7 @@ struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_di
struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
if (can_goto_child(he, hmd))
- node = rb_first(&he->hroot_out);
+ node = rb_first_cached(&he->hroot_out);
else
node = rb_next(node);
@@ -1847,7 +1932,7 @@ bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
if (he->leaf)
return false;
- node = rb_first(&he->hroot_out);
+ node = rb_first_cached(&he->hroot_out);
child = rb_entry(node, struct hist_entry, rb_node);
while (node && child->filtered) {
@@ -1965,7 +2050,7 @@ static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t fil
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
- for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (filter(hists, h))
@@ -1975,13 +2060,15 @@ static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t fil
}
}
-static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
+static void resort_filtered_entry(struct rb_root_cached *root,
+ struct hist_entry *he)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p = &root->rb_root.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
- struct rb_root new_root = RB_ROOT;
+ struct rb_root_cached new_root = RB_ROOT_CACHED;
struct rb_node *nd;
+ bool leftmost = true;
while (*p != NULL) {
parent = *p;
@@ -1989,22 +2076,24 @@ static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
if (hist_entry__sort(he, iter) > 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, root);
+ rb_insert_color_cached(&he->rb_node, root, leftmost);
if (he->leaf || he->filtered)
return;
- nd = rb_first(&he->hroot_out);
+ nd = rb_first_cached(&he->hroot_out);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd = rb_next(nd);
- rb_erase(&h->rb_node, &he->hroot_out);
+ rb_erase_cached(&h->rb_node, &he->hroot_out);
resort_filtered_entry(&new_root, h);
}
@@ -2015,14 +2104,14 @@ static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
{
struct rb_node *nd;
- struct rb_root new_root = RB_ROOT;
+ struct rb_root_cached new_root = RB_ROOT_CACHED;
hists->stats.nr_non_filtered_samples = 0;
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
- nd = rb_first(&hists->entries);
+ nd = rb_first_cached(&hists->entries);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
int ret;
@@ -2066,12 +2155,12 @@ static void hists__filter_hierarchy(struct hists *hists, int type, const void *a
* resort output after applying a new filter since filter in a lower
* hierarchy can change periods in a upper hierarchy.
*/
- nd = rb_first(&hists->entries);
+ nd = rb_first_cached(&hists->entries);
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd = rb_next(nd);
- rb_erase(&h->rb_node, &hists->entries);
+ rb_erase_cached(&h->rb_node, &hists->entries);
resort_filtered_entry(&new_root, h);
}
@@ -2140,18 +2229,19 @@ void hists__inc_nr_samples(struct hists *hists, bool filtered)
static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
struct hist_entry *pair)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
int64_t cmp;
+ bool leftmost = true;
if (hists__has(hists, need_collapse))
root = &hists->entries_collapsed;
else
root = hists->entries_in;
- p = &root->rb_node;
+ p = &root->rb_root.rb_node;
while (*p != NULL) {
parent = *p;
@@ -2164,8 +2254,10 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
if (cmp < 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
he = hist_entry__new(pair, true);
@@ -2175,7 +2267,7 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
if (symbol_conf.cumulate_callchain)
memset(he->stat_acc, 0, sizeof(he->stat));
rb_link_node(&he->rb_node_in, parent, p);
- rb_insert_color(&he->rb_node_in, root);
+ rb_insert_color_cached(&he->rb_node_in, root, leftmost);
hists__inc_stats(hists, he);
he->dummy = true;
}
@@ -2184,15 +2276,16 @@ out:
}
static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
- struct rb_root *root,
+ struct rb_root_cached *root,
struct hist_entry *pair)
{
struct rb_node **p;
struct rb_node *parent = NULL;
struct hist_entry *he;
struct perf_hpp_fmt *fmt;
+ bool leftmost = true;
- p = &root->rb_node;
+ p = &root->rb_root.rb_node;
while (*p != NULL) {
int64_t cmp = 0;
@@ -2209,14 +2302,16 @@ static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
if (cmp < 0)
p = &parent->rb_left;
- else
+ else {
p = &parent->rb_right;
+ leftmost = false;
+ }
}
he = hist_entry__new(pair, true);
if (he) {
rb_link_node(&he->rb_node_in, parent, p);
- rb_insert_color(&he->rb_node_in, root);
+ rb_insert_color_cached(&he->rb_node_in, root, leftmost);
he->dummy = true;
he->hists = hists;
@@ -2233,9 +2328,9 @@ static struct hist_entry *hists__find_entry(struct hists *hists,
struct rb_node *n;
if (hists__has(hists, need_collapse))
- n = hists->entries_collapsed.rb_node;
+ n = hists->entries_collapsed.rb_root.rb_node;
else
- n = hists->entries_in->rb_node;
+ n = hists->entries_in->rb_root.rb_node;
while (n) {
struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
@@ -2252,10 +2347,10 @@ static struct hist_entry *hists__find_entry(struct hists *hists,
return NULL;
}
-static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root,
+static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
struct hist_entry *he)
{
- struct rb_node *n = root->rb_node;
+ struct rb_node *n = root->rb_root.rb_node;
while (n) {
struct hist_entry *iter;
@@ -2280,13 +2375,13 @@ static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root,
return NULL;
}
-static void hists__match_hierarchy(struct rb_root *leader_root,
- struct rb_root *other_root)
+static void hists__match_hierarchy(struct rb_root_cached *leader_root,
+ struct rb_root_cached *other_root)
{
struct rb_node *nd;
struct hist_entry *pos, *pair;
- for (nd = rb_first(leader_root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
pair = hists__find_hierarchy_entry(other_root, pos);
@@ -2302,7 +2397,7 @@ static void hists__match_hierarchy(struct rb_root *leader_root,
*/
void hists__match(struct hists *leader, struct hists *other)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *nd;
struct hist_entry *pos, *pair;
@@ -2317,7 +2412,7 @@ void hists__match(struct hists *leader, struct hists *other)
else
root = leader->entries_in;
- for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
pair = hists__find_entry(other, pos);
@@ -2328,13 +2423,13 @@ void hists__match(struct hists *leader, struct hists *other)
static int hists__link_hierarchy(struct hists *leader_hists,
struct hist_entry *parent,
- struct rb_root *leader_root,
- struct rb_root *other_root)
+ struct rb_root_cached *leader_root,
+ struct rb_root_cached *other_root)
{
struct rb_node *nd;
struct hist_entry *pos, *leader;
- for (nd = rb_first(other_root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
if (hist_entry__has_pairs(pos)) {
@@ -2377,7 +2472,7 @@ static int hists__link_hierarchy(struct hists *leader_hists,
*/
int hists__link(struct hists *leader, struct hists *other)
{
- struct rb_root *root;
+ struct rb_root_cached *root;
struct rb_node *nd;
struct hist_entry *pos, *pair;
@@ -2393,7 +2488,7 @@ int hists__link(struct hists *leader, struct hists *other)
else
root = other->entries_in;
- for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct hist_entry, rb_node_in);
if (!hist_entry__has_pairs(pos)) {
@@ -2566,10 +2661,10 @@ int perf_hist_config(const char *var, const char *value)
int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
{
memset(hists, 0, sizeof(*hists));
- hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
+ hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
hists->entries_in = &hists->entries_in_array[0];
- hists->entries_collapsed = RB_ROOT;
- hists->entries = RB_ROOT;
+ hists->entries_collapsed = RB_ROOT_CACHED;
+ hists->entries = RB_ROOT_CACHED;
pthread_mutex_init(&hists->lock, NULL);
hists->socket_filter = -1;
hists->hpp_list = hpp_list;
@@ -2577,14 +2672,14 @@ int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
return 0;
}
-static void hists__delete_remaining_entries(struct rb_root *root)
+static void hists__delete_remaining_entries(struct rb_root_cached *root)
{
struct rb_node *node;
struct hist_entry *he;
- while (!RB_EMPTY_ROOT(root)) {
- node = rb_first(root);
- rb_erase(node, root);
+ while (!RB_EMPTY_ROOT(&root->rb_root)) {
+ node = rb_first_cached(root);
+ rb_erase_cached(node, root);
he = rb_entry(node, struct hist_entry, rb_node_in);
hist_entry__delete(he);
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 664b5eda8d51..76ff6c6d03b8 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -2,9 +2,9 @@
#ifndef __PERF_HIST_H
#define __PERF_HIST_H
+#include <linux/rbtree.h>
#include <linux/types.h>
#include <pthread.h>
-#include "callchain.h"
#include "evsel.h"
#include "header.h"
#include "color.h"
@@ -13,6 +13,9 @@
struct hist_entry;
struct hist_entry_ops;
struct addr_location;
+struct map_symbol;
+struct mem_info;
+struct branch_info;
struct symbol;
enum hist_filter {
@@ -28,6 +31,7 @@ enum hist_filter {
enum hist_column {
HISTC_SYMBOL,
+ HISTC_TIME,
HISTC_DSO,
HISTC_THREAD,
HISTC_COMM,
@@ -70,10 +74,10 @@ struct thread;
struct dso;
struct hists {
- struct rb_root entries_in_array[2];
- struct rb_root *entries_in;
- struct rb_root entries;
- struct rb_root entries_collapsed;
+ struct rb_root_cached entries_in_array[2];
+ struct rb_root_cached *entries_in;
+ struct rb_root_cached entries;
+ struct rb_root_cached entries_collapsed;
u64 nr_entries;
u64 nr_non_filtered_entries;
u64 callchain_period;
@@ -160,8 +164,10 @@ int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
struct perf_hpp_fmt *fmt, int printed);
void hist_entry__delete(struct hist_entry *he);
-typedef int (*hists__resort_cb_t)(struct hist_entry *he);
+typedef int (*hists__resort_cb_t)(struct hist_entry *he, void *arg);
+void perf_evsel__output_resort_cb(struct perf_evsel *evsel, struct ui_progress *prog,
+ hists__resort_cb_t cb, void *cb_arg);
void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog);
void hists__output_resort(struct hists *hists, struct ui_progress *prog);
void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
@@ -230,7 +236,7 @@ static __pure inline bool hists__has_callchains(struct hists *hists)
int hists__init(void);
int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list);
-struct rb_root *hists__get_rotate_entries_in(struct hists *hists);
+struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists);
struct perf_hpp {
char *buf;
@@ -427,9 +433,18 @@ struct hist_browser_timer {
};
struct annotation_options;
+struct res_sample;
+
+enum rstype {
+ A_NORMAL,
+ A_ASM,
+ A_SOURCE
+};
#ifdef HAVE_SLANG_SUPPORT
#include "../ui/keysyms.h"
+void attr_to_script(char *buf, struct perf_event_attr *attr);
+
int map_symbol__tui_annotate(struct map_symbol *ms, struct perf_evsel *evsel,
struct hist_browser_timer *hbt,
struct annotation_options *annotation_opts);
@@ -444,7 +459,13 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
struct perf_env *env,
bool warn_lost_event,
struct annotation_options *annotation_options);
-int script_browse(const char *script_opt);
+
+int script_browse(const char *script_opt, struct perf_evsel *evsel);
+
+void run_script(char *cmd);
+int res_sample_browse(struct res_sample *res_samples, int num_res,
+ struct perf_evsel *evsel, enum rstype rstype);
+void res_sample_init(void);
#else
static inline
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
@@ -473,11 +494,22 @@ static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
return 0;
}
-static inline int script_browse(const char *script_opt __maybe_unused)
+static inline int script_browse(const char *script_opt __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused)
{
return 0;
}
+static inline int res_sample_browse(struct res_sample *res_samples __maybe_unused,
+ int num_res __maybe_unused,
+ struct perf_evsel *evsel __maybe_unused,
+ enum rstype rstype __maybe_unused)
+{
+ return 0;
+}
+
+static inline void res_sample_init(void) {}
+
#define K_LEFT -1000
#define K_RIGHT -2000
#define K_SWITCH_INPUT_DATA -3000
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index ee6ca65f81f4..e32dbffebb2f 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* intel-bts.c: Intel Processor Trace support
* Copyright (c) 2013-2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <endian.h>
@@ -27,6 +18,8 @@
#include "evsel.h"
#include "evlist.h"
#include "machine.h"
+#include "map.h"
+#include "symbol.h"
#include "session.h"
#include "util.h"
#include "thread.h"
@@ -142,7 +135,7 @@ static int intel_bts_lost(struct intel_bts *bts, struct perf_sample *sample)
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
INTEL_BTS_ERR_LOST, sample->cpu, sample->pid,
- sample->tid, 0, "Lost trace data");
+ sample->tid, 0, "Lost trace data", sample->time);
err = perf_session__deliver_synth_event(bts->session, &event, NULL);
if (err)
@@ -326,35 +319,19 @@ static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip)
{
struct machine *machine = btsq->bts->machine;
struct thread *thread;
- struct addr_location al;
unsigned char buf[INTEL_PT_INSN_BUF_SZ];
ssize_t len;
- int x86_64;
- uint8_t cpumode;
+ bool x86_64;
int err = -1;
- if (machine__kernel_ip(machine, ip))
- cpumode = PERF_RECORD_MISC_KERNEL;
- else
- cpumode = PERF_RECORD_MISC_USER;
-
thread = machine__find_thread(machine, -1, btsq->tid);
if (!thread)
return -1;
- if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
- goto out_put;
-
- len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf,
- INTEL_PT_INSN_BUF_SZ);
+ len = thread__memcpy(thread, machine, buf, ip, INTEL_PT_INSN_BUF_SZ, &x86_64);
if (len <= 0)
goto out_put;
- /* Load maps to ensure dso->is_64_bit has been updated */
- map__load(al.map);
-
- x86_64 = al.map->dso->is_64_bit;
-
if (intel_pt_get_insn(buf, len, x86_64, &btsq->intel_pt_insn))
goto out_put;
@@ -372,7 +349,7 @@ static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid,
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
INTEL_BTS_ERR_NOINSN, cpu, pid, tid, ip,
- "Failed to get instruction");
+ "Failed to get instruction", 0);
err = perf_session__deliver_synth_event(bts->session, &event, NULL);
if (err)
diff --git a/tools/perf/util/intel-bts.h b/tools/perf/util/intel-bts.h
index ca65e21b3e83..53d5aa02766a 100644
--- a/tools/perf/util/intel-bts.h
+++ b/tools/perf/util/intel-bts.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* intel-bts.h: Intel Processor Trace support
* Copyright (c) 2013-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#ifndef INCLUDE__PERF_INTEL_BTS_H__
diff --git a/tools/perf/util/intel-pt-decoder/Build b/tools/perf/util/intel-pt-decoder/Build
index 1b704fbea9de..23bf788f84b9 100644
--- a/tools/perf/util/intel-pt-decoder/Build
+++ b/tools/perf/util/intel-pt-decoder/Build
@@ -1,4 +1,4 @@
-libperf-$(CONFIG_AUXTRACE) += intel-pt-pkt-decoder.o intel-pt-insn-decoder.o intel-pt-log.o intel-pt-decoder.o
+perf-$(CONFIG_AUXTRACE) += intel-pt-pkt-decoder.o intel-pt-insn-decoder.o intel-pt-log.o intel-pt-decoder.o
inat_tables_script = util/intel-pt-decoder/gen-insn-attr-x86.awk
inat_tables_maps = util/intel-pt-decoder/x86-opcode-map.txt
diff --git a/tools/perf/util/intel-pt-decoder/inat.c b/tools/perf/util/intel-pt-decoder/inat.c
index 906d94aa0a24..446c0413a27c 100644
--- a/tools/perf/util/intel-pt-decoder/inat.c
+++ b/tools/perf/util/intel-pt-decoder/inat.c
@@ -1,22 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* x86 instruction attribute tables
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#include "insn.h"
diff --git a/tools/perf/util/intel-pt-decoder/inat.h b/tools/perf/util/intel-pt-decoder/inat.h
index 52dc8d911173..877827b7c2c3 100644
--- a/tools/perf/util/intel-pt-decoder/inat.h
+++ b/tools/perf/util/intel-pt-decoder/inat.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ASM_X86_INAT_H
#define _ASM_X86_INAT_H
/*
* x86 instruction attributes
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#include "inat_types.h"
diff --git a/tools/perf/util/intel-pt-decoder/inat_types.h b/tools/perf/util/intel-pt-decoder/inat_types.h
index cb3c20ce39cf..b047efa9ddc2 100644
--- a/tools/perf/util/intel-pt-decoder/inat_types.h
+++ b/tools/perf/util/intel-pt-decoder/inat_types.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ASM_X86_INAT_TYPES_H
#define _ASM_X86_INAT_TYPES_H
/*
* x86 instruction attributes
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
/* Instruction attributes */
diff --git a/tools/perf/util/intel-pt-decoder/insn.c b/tools/perf/util/intel-pt-decoder/insn.c
index ca983e2bea8b..82783bf43b74 100644
--- a/tools/perf/util/intel-pt-decoder/insn.c
+++ b/tools/perf/util/intel-pt-decoder/insn.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* x86 instruction analysis
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* Copyright (C) IBM Corporation, 2002, 2004, 2009
*/
diff --git a/tools/perf/util/intel-pt-decoder/insn.h b/tools/perf/util/intel-pt-decoder/insn.h
index 2669c9f748e4..37a4c390750b 100644
--- a/tools/perf/util/intel-pt-decoder/insn.h
+++ b/tools/perf/util/intel-pt-decoder/insn.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ASM_X86_INSN_H
#define _ASM_X86_INSN_H
/*
* x86 instruction analysis
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* Copyright (C) IBM Corporation, 2009
*/
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 4503f3ca45ab..9d189e90fbdc 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* intel_pt_decoder.c: Intel Processor Trace support
* Copyright (c) 2013-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#ifndef _GNU_SOURCE
@@ -26,6 +17,7 @@
#include "../cache.h"
#include "../util.h"
+#include "../auxtrace.h"
#include "intel-pt-insn-decoder.h"
#include "intel-pt-pkt-decoder.h"
@@ -57,6 +49,7 @@ enum intel_pt_pkt_state {
INTEL_PT_STATE_NO_IP,
INTEL_PT_STATE_ERR_RESYNC,
INTEL_PT_STATE_IN_SYNC,
+ INTEL_PT_STATE_TNT_CONT,
INTEL_PT_STATE_TNT,
INTEL_PT_STATE_TIP,
INTEL_PT_STATE_TIP_PGD,
@@ -71,8 +64,9 @@ static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
case INTEL_PT_STATE_NO_IP:
case INTEL_PT_STATE_ERR_RESYNC:
case INTEL_PT_STATE_IN_SYNC:
- case INTEL_PT_STATE_TNT:
+ case INTEL_PT_STATE_TNT_CONT:
return true;
+ case INTEL_PT_STATE_TNT:
case INTEL_PT_STATE_TIP:
case INTEL_PT_STATE_TIP_PGD:
case INTEL_PT_STATE_FUP:
@@ -250,19 +244,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
decoder->tsc_ctc_ratio_d;
-
- /*
- * Allow for timestamps appearing to backwards because a TSC
- * packet has slipped past a MTC packet, so allow 2 MTC ticks
- * or ...
- */
- decoder->tsc_slip = multdiv(2 << decoder->mtc_shift,
- decoder->tsc_ctc_ratio_n,
- decoder->tsc_ctc_ratio_d);
}
- /* ... or 0x100 paranoia */
- if (decoder->tsc_slip < 0x100)
- decoder->tsc_slip = 0x100;
+
+ /*
+ * A TSC packet can slip past MTC packets so that the timestamp appears
+ * to go backwards. One estimate is that can be up to about 40 CPU
+ * cycles, which is certainly less than 0x1000 TSC ticks, but accept
+ * slippage an order of magnitude more to be on the safe side.
+ */
+ decoder->tsc_slip = 0x10000;
intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
@@ -867,7 +857,7 @@ static int intel_pt_get_next_packet(struct intel_pt_decoder *decoder)
ret = intel_pt_get_packet(decoder->buf, decoder->len,
&decoder->packet);
- if (ret == INTEL_PT_NEED_MORE_BYTES &&
+ if (ret == INTEL_PT_NEED_MORE_BYTES && BITS_PER_LONG == 32 &&
decoder->len < INTEL_PT_PKT_MAX_SZ && !decoder->next_buf) {
ret = intel_pt_get_split_packet(decoder);
if (ret < 0)
@@ -891,16 +881,20 @@ static uint64_t intel_pt_next_period(struct intel_pt_decoder *decoder)
timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
masked_timestamp = timestamp & decoder->period_mask;
if (decoder->continuous_period) {
- if (masked_timestamp != decoder->last_masked_timestamp)
+ if (masked_timestamp > decoder->last_masked_timestamp)
return 1;
} else {
timestamp += 1;
masked_timestamp = timestamp & decoder->period_mask;
- if (masked_timestamp != decoder->last_masked_timestamp) {
+ if (masked_timestamp > decoder->last_masked_timestamp) {
decoder->last_masked_timestamp = masked_timestamp;
decoder->continuous_period = true;
}
}
+
+ if (masked_timestamp < decoder->last_masked_timestamp)
+ return decoder->period_ticks;
+
return decoder->period_ticks - (timestamp - masked_timestamp);
}
@@ -929,7 +923,10 @@ static void intel_pt_sample_insn(struct intel_pt_decoder *decoder)
case INTEL_PT_PERIOD_TICKS:
timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
masked_timestamp = timestamp & decoder->period_mask;
- decoder->last_masked_timestamp = masked_timestamp;
+ if (masked_timestamp > decoder->last_masked_timestamp)
+ decoder->last_masked_timestamp = masked_timestamp;
+ else
+ decoder->last_masked_timestamp += decoder->period_ticks;
break;
case INTEL_PT_PERIOD_NONE:
case INTEL_PT_PERIOD_MTC:
@@ -1257,7 +1254,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
return -ENOENT;
}
decoder->tnt.count -= 1;
- if (!decoder->tnt.count)
+ if (decoder->tnt.count)
+ decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
+ else
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
decoder->tnt.payload <<= 1;
decoder->state.from_ip = decoder->ip;
@@ -1288,7 +1287,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
decoder->tnt.count -= 1;
- if (!decoder->tnt.count)
+ if (decoder->tnt.count)
+ decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
+ else
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
if (decoder->tnt.payload & BIT63) {
decoder->tnt.payload <<= 1;
@@ -1308,8 +1309,11 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
return 0;
}
decoder->ip += intel_pt_insn.length;
- if (!decoder->tnt.count)
+ if (!decoder->tnt.count) {
+ decoder->sample_timestamp = decoder->timestamp;
+ decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
return -EAGAIN;
+ }
decoder->tnt.payload <<= 1;
continue;
}
@@ -1394,7 +1398,6 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
{
intel_pt_log("ERROR: Buffer overflow\n");
intel_pt_clear_tx_flags(decoder);
- decoder->cbr = 0;
decoder->timestamp_insn_cnt = 0;
decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
decoder->overflow = true;
@@ -2369,6 +2372,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
err = intel_pt_walk_trace(decoder);
break;
case INTEL_PT_STATE_TNT:
+ case INTEL_PT_STATE_TNT_CONT:
err = intel_pt_walk_tnt(decoder);
if (err == -EAGAIN)
err = intel_pt_walk_trace(decoder);
@@ -2575,6 +2579,34 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
}
}
+#define MAX_PADDING (PERF_AUXTRACE_RECORD_ALIGNMENT - 1)
+
+/**
+ * adj_for_padding - adjust overlap to account for padding.
+ * @buf_b: second buffer
+ * @buf_a: first buffer
+ * @len_a: size of first buffer
+ *
+ * @buf_a might have up to 7 bytes of padding appended. Adjust the overlap
+ * accordingly.
+ *
+ * Return: A pointer into @buf_b from where non-overlapped data starts
+ */
+static unsigned char *adj_for_padding(unsigned char *buf_b,
+ unsigned char *buf_a, size_t len_a)
+{
+ unsigned char *p = buf_b - MAX_PADDING;
+ unsigned char *q = buf_a + len_a - MAX_PADDING;
+ int i;
+
+ for (i = MAX_PADDING; i; i--, p++, q++) {
+ if (*p != *q)
+ break;
+ }
+
+ return p;
+}
+
/**
* intel_pt_find_overlap_tsc - determine start of non-overlapped trace data
* using TSC.
@@ -2625,8 +2657,11 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
/* Same TSC, so buffers are consecutive */
if (!cmp && rem_b >= rem_a) {
+ unsigned char *start;
+
*consecutive = true;
- return buf_b + len_b - (rem_b - rem_a);
+ start = buf_b + len_b - (rem_b - rem_a);
+ return adj_for_padding(start, buf_a, len_a);
}
if (cmp < 0)
return buf_b; /* tsc_a < tsc_b => no overlap */
@@ -2689,7 +2724,7 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
found = memmem(buf_a, len_a, buf_b, len_a);
if (found) {
*consecutive = true;
- return buf_b + len_a;
+ return adj_for_padding(buf_b + len_a, buf_a, len_a);
}
/* Try again at next PSB in buffer 'a' */
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
index ed088d4726ba..1e8cfdc7bfab 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* intel_pt_decoder.h: Intel Processor Trace support
* Copyright (c) 2013-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#ifndef INCLUDE__INTEL_PT_DECODER_H__
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
index 1c0e289f01e6..598f56be9f17 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* intel_pt_insn_decoder.c: Intel Processor Trace support
* Copyright (c) 2013-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <stdio.h>
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h
index 37ec5627ae9b..95a1eb0141ff 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* intel_pt_insn_decoder.h: Intel Processor Trace support
* Copyright (c) 2013-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#ifndef INCLUDE__INTEL_PT_INSN_DECODER_H__
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.c b/tools/perf/util/intel-pt-decoder/intel-pt-log.c
index 5e64da270f97..09feb5b07d32 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-log.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* intel_pt_log.c: Intel Processor Trace support
* Copyright (c) 2013-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <stdio.h>
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.h b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
index cc084937f701..388661f89c44 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-log.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* intel_pt_log.h: Intel Processor Trace support
* Copyright (c) 2013-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#ifndef INCLUDE__INTEL_PT_LOG_H__
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
index d426761a549d..605fce537d80 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* intel_pt_pkt_decoder.c: Intel Processor Trace support
* Copyright (c) 2013-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <stdio.h>
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h
index 73ddc3a88d07..a7aefaa08588 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* intel_pt_pkt_decoder.h: Intel Processor Trace support
* Copyright (c) 2013-2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#ifndef INCLUDE__INTEL_PT_PKT_DECODER_H__
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 2e72373ec6df..f7dd4657535d 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* intel_pt.c: Intel Processor Trace support
* Copyright (c) 2013-2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <inttypes.h>
@@ -1411,7 +1402,7 @@ static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
}
static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
- pid_t pid, pid_t tid, u64 ip)
+ pid_t pid, pid_t tid, u64 ip, u64 timestamp)
{
union perf_event event;
char msg[MAX_AUXTRACE_ERROR_MSG];
@@ -1420,7 +1411,7 @@ static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
- code, cpu, pid, tid, ip, msg);
+ code, cpu, pid, tid, ip, msg, timestamp);
err = perf_session__deliver_synth_event(pt->session, &event, NULL);
if (err)
@@ -1430,6 +1421,18 @@ static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
return err;
}
+static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
+ const struct intel_pt_state *state)
+{
+ struct intel_pt *pt = ptq->pt;
+ u64 tm = ptq->timestamp;
+
+ tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
+
+ return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid,
+ ptq->tid, state->from_ip, tm);
+}
+
static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
{
struct auxtrace_queue *queue;
@@ -1676,10 +1679,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
intel_pt_next_tid(pt, ptq);
}
if (pt->synth_opts.errors) {
- err = intel_pt_synth_error(pt, state->err,
- ptq->cpu, ptq->pid,
- ptq->tid,
- state->from_ip);
+ err = intel_ptq_synth_error(ptq, state);
if (err)
return err;
}
@@ -1804,7 +1804,7 @@ static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
{
return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
- sample->pid, sample->tid, 0);
+ sample->pid, sample->tid, 0, sample->time);
}
static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
@@ -2522,6 +2522,8 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
}
pt->timeless_decoding = intel_pt_timeless_decoding(pt);
+ if (pt->timeless_decoding && !pt->tc.time_mult)
+ pt->tc.time_mult = 1;
pt->have_tsc = intel_pt_have_tsc(pt);
pt->sampling_mode = false;
pt->est_tsc = !pt->timeless_decoding;
@@ -2577,7 +2579,8 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
} else {
itrace_synth_opts__set_default(&pt->synth_opts,
session->itrace_synth_opts->default_no_sample);
- if (use_browser != -1) {
+ if (!session->itrace_synth_opts->default_no_sample &&
+ !session->itrace_synth_opts->inject) {
pt->synth_opts.branches = false;
pt->synth_opts.callchain = true;
}
diff --git a/tools/perf/util/intel-pt.h b/tools/perf/util/intel-pt.h
index e13b14e5a37b..c7d6068e3a6b 100644
--- a/tools/perf/util/intel-pt.h
+++ b/tools/perf/util/intel-pt.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* intel_pt.h: Intel Processor Trace support
* Copyright (c) 2013-2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#ifndef INCLUDE__PERF_INTEL_PT_H__
diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c
index 89715b64a315..84e5304e151a 100644
--- a/tools/perf/util/intlist.c
+++ b/tools/perf/util/intlist.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on intlist.c by:
* (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com>
- *
- * Licensed under the GPLv2.
*/
#include <errno.h>
diff --git a/tools/perf/util/intlist.h b/tools/perf/util/intlist.h
index 85bab8735fa9..5c19ee001299 100644
--- a/tools/perf/util/intlist.h
+++ b/tools/perf/util/intlist.h
@@ -45,7 +45,7 @@ static inline unsigned int intlist__nr_entries(const struct intlist *ilist)
/* For intlist iteration */
static inline struct int_node *intlist__first(struct intlist *ilist)
{
- struct rb_node *rn = rb_first(&ilist->rblist.entries);
+ struct rb_node *rn = rb_first_cached(&ilist->rblist.entries);
return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
}
static inline struct int_node *intlist__next(struct int_node *in)
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index bf249552a9b0..eda28d3570bc 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -2,6 +2,7 @@
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <errno.h>
+#include <libgen.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
diff --git a/tools/perf/util/jitdump.h b/tools/perf/util/jitdump.h
index c6b9b67f43bf..f2c3823cc81a 100644
--- a/tools/perf/util/jitdump.h
+++ b/tools/perf/util/jitdump.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* jitdump.h: jitted code info encapsulation file format
*
diff --git a/tools/perf/util/kvm-stat.h b/tools/perf/util/kvm-stat.h
index 7b1f06567521..1403dec189b4 100644
--- a/tools/perf/util/kvm-stat.h
+++ b/tools/perf/util/kvm-stat.h
@@ -3,12 +3,13 @@
#define __PERF_KVM_STAT_H
#include "../perf.h"
-#include "evsel.h"
-#include "evlist.h"
-#include "session.h"
#include "tool.h"
#include "stat.h"
+struct perf_evsel;
+struct perf_evlist;
+struct perf_session;
+
struct event_key {
#define INVALID_KEY (~0ULL)
u64 key;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 143f7057d581..dc7aafe45a2b 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -10,6 +10,7 @@
#include "hist.h"
#include "machine.h"
#include "map.h"
+#include "symbol.h"
#include "sort.h"
#include "strlist.h"
#include "thread.h"
@@ -21,6 +22,7 @@
#include "unwind.h"
#include "linux/hash.h"
#include "asm/bug.h"
+#include "bpf-event.h"
#include "sane_ctype.h"
#include <symbol/kallsyms.h>
@@ -41,7 +43,7 @@ static void machine__threads_init(struct machine *machine)
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
- threads->entries = RB_ROOT;
+ threads->entries = RB_ROOT_CACHED;
init_rwsem(&threads->lock);
threads->nr = 0;
INIT_LIST_HEAD(&threads->dead);
@@ -179,7 +181,7 @@ void machine__delete_threads(struct machine *machine)
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads *threads = &machine->threads[i];
down_write(&threads->lock);
- nd = rb_first(&threads->entries);
+ nd = rb_first_cached(&threads->entries);
while (nd) {
struct thread *t = rb_entry(nd, struct thread, rb_node);
@@ -222,7 +224,7 @@ void machine__delete(struct machine *machine)
void machines__init(struct machines *machines)
{
machine__init(&machines->host, "", HOST_KERNEL_ID);
- machines->guests = RB_ROOT;
+ machines->guests = RB_ROOT_CACHED;
}
void machines__exit(struct machines *machines)
@@ -234,9 +236,10 @@ void machines__exit(struct machines *machines)
struct machine *machines__add(struct machines *machines, pid_t pid,
const char *root_dir)
{
- struct rb_node **p = &machines->guests.rb_node;
+ struct rb_node **p = &machines->guests.rb_root.rb_node;
struct rb_node *parent = NULL;
struct machine *pos, *machine = malloc(sizeof(*machine));
+ bool leftmost = true;
if (machine == NULL)
return NULL;
@@ -251,12 +254,14 @@ struct machine *machines__add(struct machines *machines, pid_t pid,
pos = rb_entry(parent, struct machine, rb_node);
if (pid < pos->pid)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&machine->rb_node, parent, p);
- rb_insert_color(&machine->rb_node, &machines->guests);
+ rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
return machine;
}
@@ -267,7 +272,7 @@ void machines__set_comm_exec(struct machines *machines, bool comm_exec)
machines->host.comm_exec = comm_exec;
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *machine = rb_entry(nd, struct machine, rb_node);
machine->comm_exec = comm_exec;
@@ -276,7 +281,7 @@ void machines__set_comm_exec(struct machines *machines, bool comm_exec)
struct machine *machines__find(struct machines *machines, pid_t pid)
{
- struct rb_node **p = &machines->guests.rb_node;
+ struct rb_node **p = &machines->guests.rb_root.rb_node;
struct rb_node *parent = NULL;
struct machine *machine;
struct machine *default_machine = NULL;
@@ -339,7 +344,7 @@ void machines__process_guests(struct machines *machines,
{
struct rb_node *nd;
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
process(pos, data);
}
@@ -352,7 +357,8 @@ void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
machines->host.id_hdr_size = id_hdr_size;
- for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
+ for (node = rb_first_cached(&machines->guests); node;
+ node = rb_next(node)) {
machine = rb_entry(node, struct machine, rb_node);
machine->id_hdr_size = id_hdr_size;
}
@@ -465,9 +471,10 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
pid_t pid, pid_t tid,
bool create)
{
- struct rb_node **p = &threads->entries.rb_node;
+ struct rb_node **p = &threads->entries.rb_root.rb_node;
struct rb_node *parent = NULL;
struct thread *th;
+ bool leftmost = true;
th = threads__get_last_match(threads, machine, pid, tid);
if (th)
@@ -485,8 +492,10 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
if (tid < th->tid)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
if (!create)
@@ -495,7 +504,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
th = thread__new(pid, tid);
if (th != NULL) {
rb_link_node(&th->rb_node, parent, p);
- rb_insert_color(&th->rb_node, &threads->entries);
+ rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
/*
* We have to initialize map_groups separately
@@ -506,7 +515,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
* leader and that would screwed the rb tree.
*/
if (thread__init_map_groups(th, machine)) {
- rb_erase_init(&th->rb_node, &threads->entries);
+ rb_erase_cached(&th->rb_node, &threads->entries);
RB_CLEAR_NODE(&th->rb_node);
thread__put(th);
return NULL;
@@ -681,6 +690,59 @@ int machine__process_switch_event(struct machine *machine __maybe_unused,
return 0;
}
+static int machine__process_ksymbol_register(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ struct symbol *sym;
+ struct map *map;
+
+ map = map_groups__find(&machine->kmaps, event->ksymbol_event.addr);
+ if (!map) {
+ map = dso__new_map(event->ksymbol_event.name);
+ if (!map)
+ return -ENOMEM;
+
+ map->start = event->ksymbol_event.addr;
+ map->pgoff = map->start;
+ map->end = map->start + event->ksymbol_event.len;
+ map_groups__insert(&machine->kmaps, map);
+ }
+
+ sym = symbol__new(event->ksymbol_event.addr, event->ksymbol_event.len,
+ 0, 0, event->ksymbol_event.name);
+ if (!sym)
+ return -ENOMEM;
+ dso__insert_symbol(map->dso, sym);
+ return 0;
+}
+
+static int machine__process_ksymbol_unregister(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused)
+{
+ struct map *map;
+
+ map = map_groups__find(&machine->kmaps, event->ksymbol_event.addr);
+ if (map)
+ map_groups__remove(&machine->kmaps, map);
+
+ return 0;
+}
+
+int machine__process_ksymbol(struct machine *machine __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample)
+{
+ if (dump_trace)
+ perf_event__fprintf_ksymbol(event, stdout);
+
+ if (event->ksymbol_event.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
+ return machine__process_ksymbol_unregister(machine, event,
+ sample);
+ return machine__process_ksymbol_register(machine, event, sample);
+}
+
static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
{
const char *dup_filename;
@@ -744,7 +806,7 @@ size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
struct rb_node *nd;
size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret += __dsos__fprintf(&pos->dsos.head, fp);
}
@@ -764,7 +826,7 @@ size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
struct rb_node *nd;
size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
}
@@ -804,7 +866,8 @@ size_t machine__fprintf(struct machine *machine, FILE *fp)
ret = fprintf(fp, "Threads: %u\n", threads->nr);
- for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&threads->entries); nd;
+ nd = rb_next(nd)) {
struct thread *pos = rb_entry(nd, struct thread, rb_node);
ret += thread__fprintf(pos, fp);
@@ -861,7 +924,8 @@ const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
* symbol_name if it's not that important.
*/
static int machine__get_running_kernel_start(struct machine *machine,
- const char **symbol_name, u64 *start)
+ const char **symbol_name,
+ u64 *start, u64 *end)
{
char filename[PATH_MAX];
int i, err = -1;
@@ -886,6 +950,11 @@ static int machine__get_running_kernel_start(struct machine *machine,
*symbol_name = name;
*start = addr;
+
+ err = kallsyms__get_function_start(filename, "_etext", &addr);
+ if (!err)
+ *end = addr;
+
return 0;
}
@@ -1107,7 +1176,7 @@ failure:
void machines__destroy_kernel_maps(struct machines *machines)
{
- struct rb_node *next = rb_first(&machines->guests);
+ struct rb_node *next = rb_first_cached(&machines->guests);
machine__destroy_kernel_maps(&machines->host);
@@ -1115,7 +1184,7 @@ void machines__destroy_kernel_maps(struct machines *machines)
struct machine *pos = rb_entry(next, struct machine, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, &machines->guests);
+ rb_erase_cached(&pos->rb_node, &machines->guests);
machine__delete(pos);
}
}
@@ -1171,8 +1240,9 @@ static char *get_kernel_version(const char *root_dir)
if (!file)
return NULL;
- version[0] = '\0';
tmp = fgets(version, sizeof(version), file);
+ if (!tmp)
+ *version = '\0';
fclose(file);
name = strstr(version, prefix);
@@ -1358,12 +1428,26 @@ static void machine__set_kernel_mmap(struct machine *machine,
machine->vmlinux_map->end = ~0ULL;
}
+static void machine__update_kernel_mmap(struct machine *machine,
+ u64 start, u64 end)
+{
+ struct map *map = machine__kernel_map(machine);
+
+ map__get(map);
+ map_groups__remove(&machine->kmaps, map);
+
+ machine__set_kernel_mmap(machine, start, end);
+
+ map_groups__insert(&machine->kmaps, map);
+ map__put(map);
+}
+
int machine__create_kernel_maps(struct machine *machine)
{
struct dso *kernel = machine__get_kernel(machine);
const char *name = NULL;
struct map *map;
- u64 addr = 0;
+ u64 start = 0, end = ~0ULL;
int ret;
if (kernel == NULL)
@@ -1382,34 +1466,31 @@ int machine__create_kernel_maps(struct machine *machine)
"continuing anyway...\n", machine->pid);
}
- if (!machine__get_running_kernel_start(machine, &name, &addr)) {
+ if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
if (name &&
- map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, addr)) {
+ map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
machine__destroy_kernel_maps(machine);
ret = -1;
goto out_put;
}
- /* we have a real start address now, so re-order the kmaps */
- map = machine__kernel_map(machine);
-
- map__get(map);
- map_groups__remove(&machine->kmaps, map);
-
- /* assume it's the last in the kmaps */
- machine__set_kernel_mmap(machine, addr, ~0ULL);
-
- map_groups__insert(&machine->kmaps, map);
- map__put(map);
+ /*
+ * we have a real start address now, so re-order the kmaps
+ * assume it's the last in the kmaps
+ */
+ machine__update_kernel_mmap(machine, start, end);
}
if (machine__create_extra_kernel_maps(machine, kernel))
pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
- /* update end address of the kernel map using adjacent module address */
- map = map__next(machine__kernel_map(machine));
- if (map)
- machine__set_kernel_mmap(machine, addr, map->start);
+ if (end == ~0ULL) {
+ /* update end address of the kernel map using adjacent module address */
+ map = map__next(machine__kernel_map(machine));
+ if (map)
+ machine__set_kernel_mmap(machine, start, map->start);
+ }
+
out_put:
dso__put(kernel);
return ret;
@@ -1536,7 +1617,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
if (strstr(kernel->long_name, "vmlinux"))
dso__set_short_name(kernel, "[kernel.vmlinux]", false);
- machine__set_kernel_mmap(machine, event->mmap.start,
+ machine__update_kernel_mmap(machine, event->mmap.start,
event->mmap.start + event->mmap.len);
/*
@@ -1680,7 +1761,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread *th,
BUG_ON(refcount_read(&th->refcnt) == 0);
if (lock)
down_write(&threads->lock);
- rb_erase_init(&th->rb_node, &threads->entries);
+ rb_erase_cached(&th->rb_node, &threads->entries);
RB_CLEAR_NODE(&th->rb_node);
--threads->nr;
/*
@@ -1812,6 +1893,10 @@ int machine__process_event(struct machine *machine, union perf_event *event,
case PERF_RECORD_SWITCH:
case PERF_RECORD_SWITCH_CPU_WIDE:
ret = machine__process_switch_event(machine, event); break;
+ case PERF_RECORD_KSYMBOL:
+ ret = machine__process_ksymbol(machine, event, sample); break;
+ case PERF_RECORD_BPF_EVENT:
+ ret = machine__process_bpf_event(machine, event, sample); break;
default:
ret = -1;
break;
@@ -2453,7 +2538,8 @@ int machine__for_each_thread(struct machine *machine,
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
threads = &machine->threads[i];
- for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&threads->entries); nd;
+ nd = rb_next(nd)) {
thread = rb_entry(nd, struct thread, rb_node);
rc = fn(thread, priv);
if (rc != 0)
@@ -2480,7 +2566,7 @@ int machines__for_each_thread(struct machines *machines,
if (rc != 0)
return rc;
- for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *machine = rb_entry(nd, struct machine, rb_node);
rc = machine__for_each_thread(machine, fn, priv);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index a5d1da60f751..f70ab98a7bde 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -4,7 +4,7 @@
#include <sys/types.h>
#include <linux/rbtree.h>
-#include "map.h"
+#include "map_groups.h"
#include "dso.h"
#include "event.h"
#include "rwsem.h"
@@ -29,11 +29,11 @@ struct vdso_info;
#define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS)
struct threads {
- struct rb_root entries;
- struct rw_semaphore lock;
- unsigned int nr;
- struct list_head dead;
- struct thread *last_match;
+ struct rb_root_cached entries;
+ struct rw_semaphore lock;
+ unsigned int nr;
+ struct list_head dead;
+ struct thread *last_match;
};
struct machine {
@@ -130,6 +130,9 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
struct perf_sample *sample);
int machine__process_mmap2_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
+int machine__process_ksymbol(struct machine *machine,
+ union perf_event *event,
+ struct perf_sample *sample);
int machine__process_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
@@ -137,7 +140,7 @@ typedef void (*machine__process_t)(struct machine *machine, void *data);
struct machines {
struct machine host;
- struct rb_root guests;
+ struct rb_root_cached guests;
};
void machines__init(struct machines *machines);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 6751301a755c..ee71efb9db62 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -261,6 +261,22 @@ bool __map__is_extra_kernel_map(const struct map *map)
return kmap && kmap->name[0];
}
+bool __map__is_bpf_prog(const struct map *map)
+{
+ const char *name;
+
+ if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
+ return true;
+
+ /*
+ * If PERF_RECORD_BPF_EVENT is not included, the dso will not have
+ * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can
+ * guess the type based on name.
+ */
+ name = map->dso->short_name;
+ return name && (strstr(name, "bpf_prog_") == name);
+}
+
bool map__has_symbols(const struct map *map)
{
return dso__has_symbols(map->dso);
@@ -286,8 +302,8 @@ void map__put(struct map *map)
void map__fixup_start(struct map *map)
{
- struct rb_root *symbols = &map->dso->symbols;
- struct rb_node *nd = rb_first(symbols);
+ struct rb_root_cached *symbols = &map->dso->symbols;
+ struct rb_node *nd = rb_first_cached(symbols);
if (nd != NULL) {
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
map->start = sym->start;
@@ -296,8 +312,8 @@ void map__fixup_start(struct map *map)
void map__fixup_end(struct map *map)
{
- struct rb_root *symbols = &map->dso->symbols;
- struct rb_node *nd = rb_last(symbols);
+ struct rb_root_cached *symbols = &map->dso->symbols;
+ struct rb_node *nd = rb_last(&symbols->rb_root);
if (nd != NULL) {
struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
map->end = sym->end;
@@ -557,6 +573,12 @@ void map_groups__init(struct map_groups *mg, struct machine *machine)
refcount_set(&mg->refcnt, 1);
}
+void map_groups__insert(struct map_groups *mg, struct map *map)
+{
+ maps__insert(&mg->maps, map);
+ map->groups = mg;
+}
+
static void __maps__purge(struct maps *maps)
{
struct rb_root *root = &maps->entries;
@@ -571,10 +593,25 @@ static void __maps__purge(struct maps *maps)
}
}
+static void __maps__purge_names(struct maps *maps)
+{
+ struct rb_root *root = &maps->names;
+ struct rb_node *next = rb_first(root);
+
+ while (next) {
+ struct map *pos = rb_entry(next, struct map, rb_node_name);
+
+ next = rb_next(&pos->rb_node_name);
+ rb_erase_init(&pos->rb_node_name, root);
+ map__put(pos);
+ }
+}
+
static void maps__exit(struct maps *maps)
{
down_write(&maps->lock);
__maps__purge(maps);
+ __maps__purge_names(maps);
up_write(&maps->lock);
}
@@ -889,10 +926,8 @@ static void __maps__insert_name(struct maps *maps, struct map *map)
rc = strcmp(m->dso->short_name, map->dso->short_name);
if (rc < 0)
p = &(*p)->rb_left;
- else if (rc > 0)
- p = &(*p)->rb_right;
else
- return;
+ p = &(*p)->rb_right;
}
rb_link_node(&map->rb_node_name, parent, p);
rb_insert_color(&map->rb_node_name, &maps->names);
@@ -911,6 +946,9 @@ static void __maps__remove(struct maps *maps, struct map *map)
{
rb_erase_init(&map->rb_node, &maps->entries);
map__put(map);
+
+ rb_erase_init(&map->rb_node_name, &maps->names);
+ map__put(map);
}
void maps__remove(struct maps *maps, struct map *map)
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 09282aa45c80..dc93787c74f0 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -6,12 +6,10 @@
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/rbtree.h>
-#include <pthread.h>
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <linux/types.h>
-#include "rwsem.h"
struct dso;
struct ip_callchain;
@@ -48,38 +46,7 @@ struct map {
refcount_t refcnt;
};
-#define KMAP_NAME_LEN 256
-
-struct kmap {
- struct ref_reloc_sym *ref_reloc_sym;
- struct map_groups *kmaps;
- char name[KMAP_NAME_LEN];
-};
-
-struct maps {
- struct rb_root entries;
- struct rb_root names;
- struct rw_semaphore lock;
-};
-
-struct map_groups {
- struct maps maps;
- struct machine *machine;
- refcount_t refcnt;
-};
-
-struct map_groups *map_groups__new(struct machine *machine);
-void map_groups__delete(struct map_groups *mg);
-bool map_groups__empty(struct map_groups *mg);
-
-static inline struct map_groups *map_groups__get(struct map_groups *mg)
-{
- if (mg)
- refcount_inc(&mg->refcnt);
- return mg;
-}
-
-void map_groups__put(struct map_groups *mg);
+struct kmap;
struct kmap *__map__kmap(struct map *map);
struct kmap *map__kmap(struct map *map);
@@ -174,18 +141,7 @@ char *map__srcline(struct map *map, u64 addr, struct symbol *sym);
int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
FILE *fp);
-struct srccode_state {
- char *srcfile;
- unsigned line;
-};
-
-static inline void srccode_state_init(struct srccode_state *state)
-{
- state->srcfile = NULL;
- state->line = 0;
-}
-
-void srccode_state_free(struct srccode_state *state);
+struct srccode_state;
int map__fprintf_srccode(struct map *map, u64 addr,
FILE *fp, struct srccode_state *state);
@@ -198,67 +154,17 @@ void map__fixup_end(struct map *map);
void map__reloc_vmlinux(struct map *map);
-void maps__insert(struct maps *maps, struct map *map);
-void maps__remove(struct maps *maps, struct map *map);
-struct map *maps__find(struct maps *maps, u64 addr);
-struct map *maps__first(struct maps *maps);
-struct map *map__next(struct map *map);
-struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
- struct map **mapp);
-void map_groups__init(struct map_groups *mg, struct machine *machine);
-void map_groups__exit(struct map_groups *mg);
-int map_groups__clone(struct thread *thread,
- struct map_groups *parent);
-size_t map_groups__fprintf(struct map_groups *mg, FILE *fp);
-
int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name,
u64 addr);
-static inline void map_groups__insert(struct map_groups *mg, struct map *map)
-{
- maps__insert(&mg->maps, map);
- map->groups = mg;
-}
-
-static inline void map_groups__remove(struct map_groups *mg, struct map *map)
-{
- maps__remove(&mg->maps, map);
-}
-
-static inline struct map *map_groups__find(struct map_groups *mg, u64 addr)
-{
- return maps__find(&mg->maps, addr);
-}
-
-struct map *map_groups__first(struct map_groups *mg);
-
-static inline struct map *map_groups__next(struct map *map)
-{
- return map__next(map);
-}
-
-struct symbol *map_groups__find_symbol(struct map_groups *mg,
- u64 addr, struct map **mapp);
-
-struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
- const char *name,
- struct map **mapp);
-
-struct addr_map_symbol;
-
-int map_groups__find_ams(struct addr_map_symbol *ams);
-
-int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
- FILE *fp);
-
-struct map *map_groups__find_by_name(struct map_groups *mg, const char *name);
-
bool __map__is_kernel(const struct map *map);
bool __map__is_extra_kernel_map(const struct map *map);
+bool __map__is_bpf_prog(const struct map *map);
static inline bool __map__is_kmodule(const struct map *map)
{
- return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map);
+ return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map) &&
+ !__map__is_bpf_prog(map);
}
bool map__has_symbols(const struct map *map);
diff --git a/tools/perf/util/map_groups.h b/tools/perf/util/map_groups.h
new file mode 100644
index 000000000000..4dcda33e0fdf
--- /dev/null
+++ b/tools/perf/util/map_groups.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_MAP_GROUPS_H
+#define __PERF_MAP_GROUPS_H
+
+#include <linux/refcount.h>
+#include <linux/rbtree.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <linux/types.h>
+#include "rwsem.h"
+
+struct ref_reloc_sym;
+struct machine;
+struct map;
+struct thread;
+
+struct maps {
+ struct rb_root entries;
+ struct rb_root names;
+ struct rw_semaphore lock;
+};
+
+void maps__insert(struct maps *maps, struct map *map);
+void maps__remove(struct maps *maps, struct map *map);
+struct map *maps__find(struct maps *maps, u64 addr);
+struct map *maps__first(struct maps *maps);
+struct map *map__next(struct map *map);
+struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp);
+
+struct map_groups {
+ struct maps maps;
+ struct machine *machine;
+ refcount_t refcnt;
+};
+
+#define KMAP_NAME_LEN 256
+
+struct kmap {
+ struct ref_reloc_sym *ref_reloc_sym;
+ struct map_groups *kmaps;
+ char name[KMAP_NAME_LEN];
+};
+
+struct map_groups *map_groups__new(struct machine *machine);
+void map_groups__delete(struct map_groups *mg);
+bool map_groups__empty(struct map_groups *mg);
+
+static inline struct map_groups *map_groups__get(struct map_groups *mg)
+{
+ if (mg)
+ refcount_inc(&mg->refcnt);
+ return mg;
+}
+
+void map_groups__put(struct map_groups *mg);
+void map_groups__init(struct map_groups *mg, struct machine *machine);
+void map_groups__exit(struct map_groups *mg);
+int map_groups__clone(struct thread *thread, struct map_groups *parent);
+size_t map_groups__fprintf(struct map_groups *mg, FILE *fp);
+
+void map_groups__insert(struct map_groups *mg, struct map *map);
+
+static inline void map_groups__remove(struct map_groups *mg, struct map *map)
+{
+ maps__remove(&mg->maps, map);
+}
+
+static inline struct map *map_groups__find(struct map_groups *mg, u64 addr)
+{
+ return maps__find(&mg->maps, addr);
+}
+
+struct map *map_groups__first(struct map_groups *mg);
+
+static inline struct map *map_groups__next(struct map *map)
+{
+ return map__next(map);
+}
+
+struct symbol *map_groups__find_symbol(struct map_groups *mg, u64 addr, struct map **mapp);
+struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, const char *name, struct map **mapp);
+
+struct addr_map_symbol;
+
+int map_groups__find_ams(struct addr_map_symbol *ams);
+
+int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, FILE *fp);
+
+struct map *map_groups__find_by_name(struct map_groups *mg, const char *name);
+
+#endif // __PERF_MAP_GROUPS_H
diff --git a/tools/perf/util/map_symbol.h b/tools/perf/util/map_symbol.h
new file mode 100644
index 000000000000..5a1aed9f6bb4
--- /dev/null
+++ b/tools/perf/util/map_symbol.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __PERF_MAP_SYMBOL
+#define __PERF_MAP_SYMBOL 1
+
+#include <linux/types.h>
+
+struct map;
+struct symbol;
+
+struct map_symbol {
+ struct map *map;
+ struct symbol *sym;
+};
+
+struct addr_map_symbol {
+ struct map *map;
+ struct symbol *sym;
+ u64 addr;
+ u64 al_addr;
+ u64 phys_addr;
+};
+#endif // __PERF_MAP_SYMBOL
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index a28f9b5cc4ff..699e020737d9 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
/* Manage metrics and groups of metrics from JSON files */
@@ -270,7 +261,7 @@ static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
}
void metricgroup__print(bool metrics, bool metricgroups, char *filter,
- bool raw)
+ bool raw, bool details)
{
struct pmu_events_map *map = perf_pmu__find_map(NULL);
struct pmu_event *pe;
@@ -329,6 +320,12 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
if (asprintf(&s, "%s\n%*s%s]",
pe->metric_name, 8, "[", pe->desc) < 0)
return;
+
+ if (details) {
+ if (asprintf(&s, "%s\n%*s%s]",
+ s, 8, "[", pe->metric_expr) < 0)
+ return;
+ }
}
if (!s)
@@ -352,7 +349,7 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
else if (metrics && !raw)
printf("\nMetrics:\n\n");
- for (node = rb_first(&groups.entries); node; node = next) {
+ for (node = rb_first_cached(&groups.entries); node; node = next) {
struct mep *me = container_of(node, struct mep, nd);
if (metricgroups)
diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h
index 8a155dba0581..5c52097a5c63 100644
--- a/tools/perf/util/metricgroup.h
+++ b/tools/perf/util/metricgroup.h
@@ -27,6 +27,7 @@ int metricgroup__parse_groups(const struct option *opt,
const char *str,
struct rblist *metric_events);
-void metricgroup__print(bool metrics, bool groups, char *filter, bool raw);
+void metricgroup__print(bool metrics, bool groups, char *filter,
+ bool raw, bool details);
bool metricgroup__has_metric(const char *metric);
#endif
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 8fc39311a30d..768c632b0d82 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -1,15 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
*
* Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
* copyright notes.
- *
- * Released under the GPL v2. (and only v2, not any later version)
*/
#include <sys/mman.h>
#include <inttypes.h>
#include <asm/bug.h>
+#ifdef HAVE_LIBNUMA_SUPPORT
+#include <numaif.h>
+#endif
#include "debug.h"
#include "event.h"
#include "mmap.h"
@@ -154,9 +156,76 @@ void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __mayb
}
#ifdef HAVE_AIO_SUPPORT
+static int perf_mmap__aio_enabled(struct perf_mmap *map)
+{
+ return map->aio.nr_cblocks > 0;
+}
+
+#ifdef HAVE_LIBNUMA_SUPPORT
+static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
+{
+ map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
+ if (map->aio.data[idx] == MAP_FAILED) {
+ map->aio.data[idx] = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
+static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
+{
+ if (map->aio.data[idx]) {
+ munmap(map->aio.data[idx], perf_mmap__mmap_len(map));
+ map->aio.data[idx] = NULL;
+ }
+}
+
+static int perf_mmap__aio_bind(struct perf_mmap *map, int idx, int cpu, int affinity)
+{
+ void *data;
+ size_t mmap_len;
+ unsigned long node_mask;
+
+ if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
+ data = map->aio.data[idx];
+ mmap_len = perf_mmap__mmap_len(map);
+ node_mask = 1UL << cpu__get_node(cpu);
+ if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) {
+ pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n",
+ data, data + mmap_len, cpu__get_node(cpu));
+ return -1;
+ }
+ }
+
+ return 0;
+}
+#else /* !HAVE_LIBNUMA_SUPPORT */
+static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
+{
+ map->aio.data[idx] = malloc(perf_mmap__mmap_len(map));
+ if (map->aio.data[idx] == NULL)
+ return -1;
+
+ return 0;
+}
+
+static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
+{
+ zfree(&(map->aio.data[idx]));
+}
+
+static int perf_mmap__aio_bind(struct perf_mmap *map __maybe_unused, int idx __maybe_unused,
+ int cpu __maybe_unused, int affinity __maybe_unused)
+{
+ return 0;
+}
+#endif
+
static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
{
- int delta_max, i, prio;
+ int delta_max, i, prio, ret;
map->aio.nr_cblocks = mp->nr_cblocks;
if (map->aio.nr_cblocks) {
@@ -177,11 +246,14 @@ static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
}
delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
for (i = 0; i < map->aio.nr_cblocks; ++i) {
- map->aio.data[i] = malloc(perf_mmap__mmap_len(map));
- if (!map->aio.data[i]) {
+ ret = perf_mmap__aio_alloc(map, i);
+ if (ret == -1) {
pr_debug2("failed to allocate data buffer area, error %m");
return -1;
}
+ ret = perf_mmap__aio_bind(map, i, map->cpu, mp->affinity);
+ if (ret == -1)
+ return -1;
/*
* Use cblock.aio_fildes value different from -1
* to denote started aio write operation on the
@@ -210,87 +282,18 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map)
int i;
for (i = 0; i < map->aio.nr_cblocks; ++i)
- zfree(&map->aio.data[i]);
+ perf_mmap__aio_free(map, i);
if (map->aio.data)
zfree(&map->aio.data);
zfree(&map->aio.cblocks);
zfree(&map->aio.aiocb);
}
-
-int perf_mmap__aio_push(struct perf_mmap *md, void *to, int idx,
- int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off),
- off_t *off)
+#else /* !HAVE_AIO_SUPPORT */
+static int perf_mmap__aio_enabled(struct perf_mmap *map __maybe_unused)
{
- u64 head = perf_mmap__read_head(md);
- unsigned char *data = md->base + page_size;
- unsigned long size, size0 = 0;
- void *buf;
- int rc = 0;
-
- rc = perf_mmap__read_init(md);
- if (rc < 0)
- return (rc == -EAGAIN) ? 0 : -1;
-
- /*
- * md->base data is copied into md->data[idx] buffer to
- * release space in the kernel buffer as fast as possible,
- * thru perf_mmap__consume() below.
- *
- * That lets the kernel to proceed with storing more
- * profiling data into the kernel buffer earlier than other
- * per-cpu kernel buffers are handled.
- *
- * Coping can be done in two steps in case the chunk of
- * profiling data crosses the upper bound of the kernel buffer.
- * In this case we first move part of data from md->start
- * till the upper bound and then the reminder from the
- * beginning of the kernel buffer till the end of
- * the data chunk.
- */
-
- size = md->end - md->start;
-
- if ((md->start & md->mask) + size != (md->end & md->mask)) {
- buf = &data[md->start & md->mask];
- size = md->mask + 1 - (md->start & md->mask);
- md->start += size;
- memcpy(md->aio.data[idx], buf, size);
- size0 = size;
- }
-
- buf = &data[md->start & md->mask];
- size = md->end - md->start;
- md->start += size;
- memcpy(md->aio.data[idx] + size0, buf, size);
-
- /*
- * Increment md->refcount to guard md->data[idx] buffer
- * from premature deallocation because md object can be
- * released earlier than aio write request started
- * on mmap->data[idx] is complete.
- *
- * perf_mmap__put() is done at record__aio_complete()
- * after started request completion.
- */
- perf_mmap__get(md);
-
- md->prev = head;
- perf_mmap__consume(md);
-
- rc = push(to, &md->aio.cblocks[idx], md->aio.data[idx], size0 + size, *off);
- if (!rc) {
- *off += size0 + size;
- } else {
- /*
- * Decrement md->refcount back if aio write
- * operation failed to start.
- */
- perf_mmap__put(md);
- }
-
- return rc;
+ return 0;
}
-#else
+
static int perf_mmap__aio_mmap(struct perf_mmap *map __maybe_unused,
struct mmap_params *mp __maybe_unused)
{
@@ -305,6 +308,10 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map __maybe_unused)
void perf_mmap__munmap(struct perf_mmap *map)
{
perf_mmap__aio_munmap(map);
+ if (map->data != NULL) {
+ munmap(map->data, perf_mmap__mmap_len(map));
+ map->data = NULL;
+ }
if (map->base != NULL) {
munmap(map->base, perf_mmap__mmap_len(map));
map->base = NULL;
@@ -314,6 +321,32 @@ void perf_mmap__munmap(struct perf_mmap *map)
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
+static void build_node_mask(int node, cpu_set_t *mask)
+{
+ int c, cpu, nr_cpus;
+ const struct cpu_map *cpu_map = NULL;
+
+ cpu_map = cpu_map__online();
+ if (!cpu_map)
+ return;
+
+ nr_cpus = cpu_map__nr(cpu_map);
+ for (c = 0; c < nr_cpus; c++) {
+ cpu = cpu_map->map[c]; /* map c index to online cpu index */
+ if (cpu__get_node(cpu) == node)
+ CPU_SET(cpu, mask);
+ }
+}
+
+static void perf_mmap__setup_affinity_mask(struct perf_mmap *map, struct mmap_params *mp)
+{
+ CPU_ZERO(&map->affinity_mask);
+ if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
+ build_node_mask(cpu__get_node(map->cpu), &map->affinity_mask);
+ else if (mp->affinity == PERF_AFFINITY_CPU)
+ CPU_SET(map->cpu, &map->affinity_mask);
+}
+
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
{
/*
@@ -343,6 +376,23 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
map->fd = fd;
map->cpu = cpu;
+ perf_mmap__setup_affinity_mask(map, mp);
+
+ map->flush = mp->flush;
+
+ map->comp_level = mp->comp_level;
+
+ if (map->comp_level && !perf_mmap__aio_enabled(map)) {
+ map->data = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
+ if (map->data == MAP_FAILED) {
+ pr_debug2("failed to mmap data buffer, error %d\n",
+ errno);
+ map->data = NULL;
+ return -1;
+ }
+ }
+
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
&mp->auxtrace_mp, map->base, fd))
return -1;
@@ -395,7 +445,7 @@ static int __perf_mmap__read_init(struct perf_mmap *md)
md->start = md->overwrite ? head : old;
md->end = md->overwrite ? old : head;
- if (md->start == md->end)
+ if ((md->end - md->start) < md->flush)
return -EAGAIN;
size = md->end - md->start;
@@ -441,7 +491,7 @@ int perf_mmap__push(struct perf_mmap *md, void *to,
rc = perf_mmap__read_init(md);
if (rc < 0)
- return (rc == -EAGAIN) ? 0 : -1;
+ return (rc == -EAGAIN) ? 1 : -1;
size = md->end - md->start;
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index aeb6942fdb00..274ce389cd84 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -38,6 +38,10 @@ struct perf_mmap {
int nr_cblocks;
} aio;
#endif
+ cpu_set_t affinity_mask;
+ u64 flush;
+ void *data;
+ int comp_level;
};
/*
@@ -69,7 +73,7 @@ enum bkw_mmap_state {
};
struct mmap_params {
- int prot, mask, nr_cblocks;
+ int prot, mask, nr_cblocks, affinity, flush, comp_level;
struct auxtrace_mmap_params auxtrace_mp;
};
@@ -97,18 +101,6 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map);
int perf_mmap__push(struct perf_mmap *md, void *to,
int push(struct perf_mmap *map, void *to, void *buf, size_t size));
-#ifdef HAVE_AIO_SUPPORT
-int perf_mmap__aio_push(struct perf_mmap *md, void *to, int idx,
- int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off),
- off_t *off);
-#else
-static inline int perf_mmap__aio_push(struct perf_mmap *md __maybe_unused, void *to __maybe_unused, int idx __maybe_unused,
- int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off) __maybe_unused,
- off_t *off __maybe_unused)
-{
- return 0;
-}
-#endif
size_t perf_mmap__mmap_len(struct perf_mmap *map);
diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
index aed170bd4384..023c4efd788d 100644
--- a/tools/perf/util/namespaces.c
+++ b/tools/perf/util/namespaces.c
@@ -1,7 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
*
* Copyright (C) 2017 Hari Bathini, IBM Corporation
*/
diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h
index d5f46c09ea31..15a5a276c478 100644
--- a/tools/perf/util/namespaces.h
+++ b/tools/perf/util/namespaces.h
@@ -1,7 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
*
* Copyright (C) 2017 Hari Bathini, IBM Corporation
*/
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index ea523d3b248f..989fed6f43b5 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -270,6 +270,8 @@ static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
"FINAL",
"ROUND",
"HALF ",
+ "TOP ",
+ "TIME ",
};
int err;
bool show_progress = false;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 920e1e6551dd..cf0b9b81c5aa 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -317,10 +317,12 @@ static struct perf_evsel *
__add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr,
char *name, struct perf_pmu *pmu,
- struct list_head *config_terms, bool auto_merge_stats)
+ struct list_head *config_terms, bool auto_merge_stats,
+ const char *cpu_list)
{
struct perf_evsel *evsel;
- struct cpu_map *cpus = pmu ? pmu->cpus : NULL;
+ struct cpu_map *cpus = pmu ? pmu->cpus :
+ cpu_list ? cpu_map__new(cpu_list) : NULL;
event_attr_init(attr);
@@ -348,7 +350,25 @@ static int add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr, char *name,
struct list_head *config_terms)
{
- return __add_event(list, idx, attr, name, NULL, config_terms, false) ? 0 : -ENOMEM;
+ return __add_event(list, idx, attr, name, NULL, config_terms, false, NULL) ? 0 : -ENOMEM;
+}
+
+static int add_event_tool(struct list_head *list, int *idx,
+ enum perf_tool_event tool_event)
+{
+ struct perf_evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_DUMMY,
+ };
+
+ evsel = __add_event(list, idx, &attr, NULL, NULL, NULL, false, "0");
+ if (!evsel)
+ return -ENOMEM;
+ evsel->tool_event = tool_event;
+ if (tool_event == PERF_TOOL_DURATION_TIME)
+ evsel->unit = strdup("ns");
+ return 0;
}
static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
@@ -930,6 +950,7 @@ static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
[PARSE_EVENTS__TERM_TYPE_OVERWRITE] = "overwrite",
[PARSE_EVENTS__TERM_TYPE_NOOVERWRITE] = "no-overwrite",
[PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config",
+ [PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
};
static bool config_term_shrinked;
@@ -950,6 +971,7 @@ config_term_avail(int term_type, struct parse_events_error *err)
case PARSE_EVENTS__TERM_TYPE_CONFIG2:
case PARSE_EVENTS__TERM_TYPE_NAME:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
+ case PARSE_EVENTS__TERM_TYPE_PERCORE:
return true;
default:
if (!err)
@@ -1041,6 +1063,14 @@ do { \
case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
CHECK_TYPE_VAL(NUM);
break;
+ case PARSE_EVENTS__TERM_TYPE_PERCORE:
+ CHECK_TYPE_VAL(NUM);
+ if ((unsigned int)term->val.num > 1) {
+ err->str = strdup("expected 0 or 1");
+ err->idx = term->err_val;
+ return -EINVAL;
+ }
+ break;
default:
err->str = strdup("unknown term");
err->idx = term->err_term;
@@ -1179,6 +1209,10 @@ do { \
case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
ADD_CONFIG_TERM(DRV_CFG, drv_cfg, term->val.str);
break;
+ case PARSE_EVENTS__TERM_TYPE_PERCORE:
+ ADD_CONFIG_TERM(PERCORE, percore,
+ term->val.num ? true : false);
+ break;
default:
break;
}
@@ -1233,6 +1267,25 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
get_config_name(head_config), &config_terms);
}
+int parse_events_add_tool(struct parse_events_state *parse_state,
+ struct list_head *list,
+ enum perf_tool_event tool_event)
+{
+ return add_event_tool(list, &parse_state->idx, tool_event);
+}
+
+static bool config_term_percore(struct list_head *config_terms)
+{
+ struct perf_evsel_config_term *term;
+
+ list_for_each_entry(term, config_terms, list) {
+ if (term->type == PERF_EVSEL__CONFIG_TERM_PERCORE)
+ return term->val.percore;
+ }
+
+ return false;
+}
+
int parse_events_add_pmu(struct parse_events_state *parse_state,
struct list_head *list, char *name,
struct list_head *head_config,
@@ -1267,7 +1320,8 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
if (!head_config) {
attr.type = pmu->type;
- evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats);
+ evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL,
+ auto_merge_stats, NULL);
if (evsel) {
evsel->pmu_name = name;
evsel->use_uncore_alias = use_uncore_alias;
@@ -1295,7 +1349,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
evsel = __add_event(list, &parse_state->idx, &attr,
get_config_name(head_config), pmu,
- &config_terms, auto_merge_stats);
+ &config_terms, auto_merge_stats, NULL);
if (evsel) {
evsel->unit = info.unit;
evsel->scale = info.scale;
@@ -1305,6 +1359,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
evsel->metric_name = info.metric_name;
evsel->pmu_name = name;
evsel->use_uncore_alias = use_uncore_alias;
+ evsel->percore = config_term_percore(&evsel->config_terms);
}
return evsel ? 0 : -ENOMEM;
@@ -2271,6 +2326,7 @@ static bool is_event_supported(u8 type, unsigned config)
perf_evsel__delete(evsel);
}
+ thread_map__put(tmap);
return ret;
}
@@ -2341,6 +2397,7 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
printf(" %-50s [%s]\n", buf, "SDT event");
free(buf);
}
+ free(path);
} else
printf(" %-50s [%s]\n", nd->s, "SDT event");
if (nd2) {
@@ -2427,6 +2484,25 @@ out_enomem:
return evt_num;
}
+static void print_tool_event(const char *name, const char *event_glob,
+ bool name_only)
+{
+ if (event_glob && !strglobmatch(name, event_glob))
+ return;
+ if (name_only)
+ printf("%s ", name);
+ else
+ printf(" %-50s [%s]\n", name, "Tool event");
+
+}
+
+void print_tool_events(const char *event_glob, bool name_only)
+{
+ print_tool_event("duration_time", event_glob, name_only);
+ if (pager_in_use())
+ printf("\n");
+}
+
void print_symbol_events(const char *event_glob, unsigned type,
struct event_symbol *syms, unsigned max,
bool name_only)
@@ -2510,6 +2586,7 @@ void print_events(const char *event_glob, bool name_only, bool quiet_flag,
print_symbol_events(event_glob, PERF_TYPE_SOFTWARE,
event_symbols_sw, PERF_COUNT_SW_MAX, name_only);
+ print_tool_events(event_glob, name_only);
print_hwcache_events(event_glob, name_only);
@@ -2540,7 +2617,7 @@ void print_events(const char *event_glob, bool name_only, bool quiet_flag,
print_sdt_events(NULL, NULL, name_only);
- metricgroup__print(true, true, NULL, name_only);
+ metricgroup__print(true, true, NULL, name_only, details_flag);
}
int parse_events__is_hardcoded_term(struct parse_events_term *term)
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 5ed035cbcbb7..f7139e1a2fd3 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -75,6 +75,7 @@ enum {
PARSE_EVENTS__TERM_TYPE_NOOVERWRITE,
PARSE_EVENTS__TERM_TYPE_OVERWRITE,
PARSE_EVENTS__TERM_TYPE_DRV_CFG,
+ PARSE_EVENTS__TERM_TYPE_PERCORE,
__PARSE_EVENTS__TERM_TYPE_NR,
};
@@ -160,6 +161,10 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
u32 type, u64 config,
struct list_head *head_config);
+enum perf_tool_event;
+int parse_events_add_tool(struct parse_events_state *parse_state,
+ struct list_head *list,
+ enum perf_tool_event tool_event);
int parse_events_add_cache(struct list_head *list, int *idx,
char *type, char *op_result1, char *op_result2,
struct parse_events_error *error,
@@ -200,6 +205,7 @@ extern struct event_symbol event_symbols_sw[];
void print_symbol_events(const char *event_glob, unsigned type,
struct event_symbol *syms, unsigned max,
bool name_only);
+void print_tool_events(const char *event_glob, bool name_only);
void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
bool name_only);
int print_hwcache_events(const char *event_glob, bool name_only);
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 7805c71aaae2..ca6098874fe2 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -15,6 +15,7 @@
#include "../perf.h"
#include "parse-events.h"
#include "parse-events-bison.h"
+#include "evsel.h"
char *parse_events_get_text(yyscan_t yyscanner);
YYSTYPE *parse_events_get_lval(yyscan_t yyscanner);
@@ -154,6 +155,14 @@ static int sym(yyscan_t scanner, int type, int config)
return type == PERF_TYPE_HARDWARE ? PE_VALUE_SYM_HW : PE_VALUE_SYM_SW;
}
+static int tool(yyscan_t scanner, enum perf_tool_event event)
+{
+ YYSTYPE *yylval = parse_events_get_lval(scanner);
+
+ yylval->num = event;
+ return PE_VALUE_SYM_TOOL;
+}
+
static int term(yyscan_t scanner, int type)
{
YYSTYPE *yylval = parse_events_get_lval(scanner);
@@ -274,6 +283,7 @@ inherit { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_INHERIT); }
no-inherit { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOINHERIT); }
overwrite { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_OVERWRITE); }
no-overwrite { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOOVERWRITE); }
+percore { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_PERCORE); }
, { return ','; }
"/" { BEGIN(INITIAL); return '/'; }
{name_minus} { return str(yyscanner, PE_NAME); }
@@ -322,7 +332,7 @@ cpu-migrations|migrations { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COU
alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
-duration_time { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
+duration_time { return tool(yyscanner, PERF_TOOL_DURATION_TIME); }
bpf-output { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_BPF_OUTPUT); }
/*
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index da8fe57691b8..6ad8d4914969 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include "util.h"
#include "pmu.h"
+#include "evsel.h"
#include "debug.h"
#include "parse-events.h"
#include "parse-events-bison.h"
@@ -45,6 +46,7 @@ static void inc_group_count(struct list_head *list,
%token PE_START_EVENTS PE_START_TERMS
%token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_RAW PE_TERM
+%token PE_VALUE_SYM_TOOL
%token PE_EVENT_NAME
%token PE_NAME
%token PE_BPF_OBJECT PE_BPF_SOURCE
@@ -58,6 +60,7 @@ static void inc_group_count(struct list_head *list,
%type <num> PE_VALUE
%type <num> PE_VALUE_SYM_HW
%type <num> PE_VALUE_SYM_SW
+%type <num> PE_VALUE_SYM_TOOL
%type <num> PE_RAW
%type <num> PE_TERM
%type <str> PE_NAME
@@ -311,7 +314,7 @@ value_sym '/' event_config '/'
$$ = list;
}
|
-value_sym sep_slash_dc
+value_sym sep_slash_slash_dc
{
struct list_head *list;
int type = $1 >> 16;
@@ -321,6 +324,15 @@ value_sym sep_slash_dc
ABORT_ON(parse_events_add_numeric(_parse_state, list, type, config, NULL));
$$ = list;
}
+|
+PE_VALUE_SYM_TOOL sep_slash_slash_dc
+{
+ struct list_head *list;
+
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_tool(_parse_state, list, $1));
+ $$ = list;
+}
event_legacy_cache:
PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT opt_event_config
@@ -702,7 +714,7 @@ PE_VALUE PE_ARRAY_RANGE PE_VALUE
sep_dc: ':' |
-sep_slash_dc: '/' | ':' |
+sep_slash_slash_dc: '/' '/' | ':' |
%%
diff --git a/tools/perf/util/parse-regs-options.c b/tools/perf/util/parse-regs-options.c
index e6599e290f46..08581e276225 100644
--- a/tools/perf/util/parse-regs-options.c
+++ b/tools/perf/util/parse-regs-options.c
@@ -5,13 +5,14 @@
#include <subcmd/parse-options.h>
#include "util/parse-regs-options.h"
-int
-parse_regs(const struct option *opt, const char *str, int unset)
+static int
+__parse_regs(const struct option *opt, const char *str, int unset, bool intr)
{
uint64_t *mode = (uint64_t *)opt->value;
const struct sample_reg *r;
char *s, *os = NULL, *p;
int ret = -1;
+ uint64_t mask;
if (unset)
return 0;
@@ -22,6 +23,11 @@ parse_regs(const struct option *opt, const char *str, int unset)
if (*mode)
return -1;
+ if (intr)
+ mask = arch__intr_reg_mask();
+ else
+ mask = arch__user_reg_mask();
+
/* str may be NULL in case no arg is passed to -I */
if (str) {
/* because str is read-only */
@@ -37,19 +43,20 @@ parse_regs(const struct option *opt, const char *str, int unset)
if (!strcmp(s, "?")) {
fprintf(stderr, "available registers: ");
for (r = sample_reg_masks; r->name; r++) {
- fprintf(stderr, "%s ", r->name);
+ if (r->mask & mask)
+ fprintf(stderr, "%s ", r->name);
}
fputc('\n', stderr);
/* just printing available regs */
return -1;
}
for (r = sample_reg_masks; r->name; r++) {
- if (!strcasecmp(s, r->name))
+ if ((r->mask & mask) && !strcasecmp(s, r->name))
break;
}
if (!r->name) {
- ui__warning("unknown register %s,"
- " check man page\n", s);
+ ui__warning("Unknown register \"%s\", check man page or run \"perf record %s?\"\n",
+ s, intr ? "-I" : "--user-regs=");
goto error;
}
@@ -65,8 +72,20 @@ parse_regs(const struct option *opt, const char *str, int unset)
/* default to all possible regs */
if (*mode == 0)
- *mode = PERF_REGS_MASK;
+ *mode = mask;
error:
free(os);
return ret;
}
+
+int
+parse_user_regs(const struct option *opt, const char *str, int unset)
+{
+ return __parse_regs(opt, str, unset, false);
+}
+
+int
+parse_intr_regs(const struct option *opt, const char *str, int unset)
+{
+ return __parse_regs(opt, str, unset, true);
+}
diff --git a/tools/perf/util/parse-regs-options.h b/tools/perf/util/parse-regs-options.h
index cdefb1acf6be..2b23d25c6394 100644
--- a/tools/perf/util/parse-regs-options.h
+++ b/tools/perf/util/parse-regs-options.h
@@ -2,5 +2,6 @@
#ifndef _PERF_PARSE_REGS_OPTIONS_H
#define _PERF_PARSE_REGS_OPTIONS_H 1
struct option;
-int parse_regs(const struct option *opt, const char *str, int unset);
+int parse_user_regs(const struct option *opt, const char *str, int unset);
+int parse_intr_regs(const struct option *opt, const char *str, int unset);
#endif /* _PERF_PARSE_REGS_OPTIONS_H */
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c
index 2acfcc527cac..2774cec1f15f 100644
--- a/tools/perf/util/perf_regs.c
+++ b/tools/perf/util/perf_regs.c
@@ -13,6 +13,16 @@ int __weak arch_sdt_arg_parse_op(char *old_op __maybe_unused,
return SDT_ARG_SKIP;
}
+uint64_t __weak arch__intr_reg_mask(void)
+{
+ return PERF_REGS_MASK;
+}
+
+uint64_t __weak arch__user_reg_mask(void)
+{
+ return PERF_REGS_MASK;
+}
+
#ifdef HAVE_PERF_REGS_SUPPORT
int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
{
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
index c9319f8d17a6..cb9c246c8962 100644
--- a/tools/perf/util/perf_regs.h
+++ b/tools/perf/util/perf_regs.h
@@ -12,6 +12,7 @@ struct sample_reg {
uint64_t mask;
};
#define SMPL_REG(n, b) { .name = #n, .mask = 1ULL << (b) }
+#define SMPL_REG2(n, b) { .name = #n, .mask = 3ULL << (b) }
#define SMPL_REG_END { .name = NULL }
extern const struct sample_reg sample_reg_masks[];
@@ -22,6 +23,8 @@ enum {
};
int arch_sdt_arg_parse_op(char *old_op, char **new_op);
+uint64_t arch__intr_reg_mask(void);
+uint64_t arch__user_reg_mask(void);
#ifdef HAVE_PERF_REGS_SUPPORT
#include <perf_regs.h>
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 11a234740632..faa8eb231e1b 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -29,8 +29,6 @@ struct perf_pmu_format {
struct list_head list;
};
-#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/"
-
int perf_pmu_parse(struct list_head *list, char *name);
extern FILE *perf_pmu_in;
@@ -711,9 +709,7 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
{
int i;
struct pmu_events_map *map;
- struct pmu_event *pe;
const char *name = pmu->name;
- const char *pname;
map = perf_pmu__find_map(pmu);
if (!map)
@@ -724,20 +720,28 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
*/
i = 0;
while (1) {
+ const char *cpu_name = is_arm_pmu_core(name) ? name : "cpu";
+ struct pmu_event *pe = &map->table[i++];
+ const char *pname = pe->pmu ? pe->pmu : cpu_name;
- pe = &map->table[i++];
if (!pe->name) {
if (pe->metric_group || pe->metric_name)
continue;
break;
}
- if (!is_arm_pmu_core(name)) {
- pname = pe->pmu ? pe->pmu : "cpu";
- if (strcmp(pname, name))
- continue;
- }
+ /*
+ * uncore alias may be from different PMU
+ * with common prefix
+ */
+ if (pmu_is_uncore(name) &&
+ !strncmp(pname, name, strlen(pname)))
+ goto new_alias;
+
+ if (strcmp(pname, name))
+ continue;
+new_alias:
/* need type casts to override 'const' */
__perf_pmu__new_alias(head, NULL, (char *)pe->name,
(char *)pe->desc, (char *)pe->event,
@@ -754,6 +758,19 @@ perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
return NULL;
}
+static int pmu_max_precise(const char *name)
+{
+ char path[PATH_MAX];
+ int max_precise = -1;
+
+ scnprintf(path, PATH_MAX,
+ "bus/event_source/devices/%s/caps/max_precise",
+ name);
+
+ sysfs__read_int(path, &max_precise);
+ return max_precise;
+}
+
static struct perf_pmu *pmu_lookup(const char *name)
{
struct perf_pmu *pmu;
@@ -786,6 +803,7 @@ static struct perf_pmu *pmu_lookup(const char *name)
pmu->name = strdup(name);
pmu->type = type;
pmu->is_uncore = pmu_is_uncore(name);
+ pmu->max_precise = pmu_max_precise(name);
pmu_add_cpu_aliases(&aliases, pmu);
INIT_LIST_HEAD(&pmu->format);
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 76fecec7b3f9..bd9ec2704a57 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -6,9 +6,10 @@
#include <linux/compiler.h>
#include <linux/perf_event.h>
#include <stdbool.h>
-#include "evsel.h"
#include "parse-events.h"
+struct perf_evsel_config_term;
+
enum {
PERF_PMU_FORMAT_VALUE_CONFIG,
PERF_PMU_FORMAT_VALUE_CONFIG1,
@@ -16,6 +17,7 @@ enum {
};
#define PERF_PMU_FORMAT_BITS 64
+#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/"
struct perf_event_attr;
@@ -24,12 +26,12 @@ struct perf_pmu {
__u32 type;
bool selectable;
bool is_uncore;
+ int max_precise;
struct perf_event_attr *default_config;
struct cpu_map *cpus;
struct list_head format; /* HEAD struct perf_pmu_format -> list */
struct list_head aliases; /* HEAD struct perf_pmu_alias -> list */
struct list_head list; /* ELEM */
- int (*set_drv_config) (struct perf_evsel_config_term *term);
};
struct perf_pmu_info {
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 18a59fba97ff..2ebf8673f8e9 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -1,22 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* probe-event.c : perf-probe definition to probe_events format converter
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#include <inttypes.h>
@@ -35,11 +21,14 @@
#include "util.h"
#include "event.h"
+#include "namespaces.h"
#include "strlist.h"
#include "strfilter.h"
#include "debug.h"
#include "cache.h"
#include "color.h"
+#include "map.h"
+#include "map_groups.h"
#include "symbol.h"
#include "thread.h"
#include <api/fs/fs.h>
@@ -157,8 +146,10 @@ static struct map *kernel_get_module_map(const char *module)
if (module && strchr(module, '/'))
return dso__new_map(module);
- if (!module)
- module = "kernel";
+ if (!module) {
+ pos = machine__kernel_map(host_machine);
+ return map__get(pos);
+ }
for (pos = maps__first(maps); pos; pos = map__next(pos)) {
/* short_name is "[module]" */
@@ -469,9 +460,12 @@ static struct debuginfo *open_debuginfo(const char *module, struct nsinfo *nsi,
strcpy(reason, "(unknown)");
} else
dso__strerror_load(dso, reason, STRERR_BUFSIZE);
- if (!silent)
- pr_err("Failed to find the path for %s: %s\n",
- module ?: "kernel", reason);
+ if (!silent) {
+ if (module)
+ pr_err("Module %s is not loaded, please specify its full path name.\n", module);
+ else
+ pr_err("Failed to find the path for the kernel: %s\n", reason);
+ }
return NULL;
}
path = dso->long_name;
@@ -3528,7 +3522,8 @@ int show_available_funcs(const char *target, struct nsinfo *nsi,
/* Show all (filtered) symbols */
setup_pager();
- for (nd = rb_first(&map->dso->symbol_names); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&map->dso->symbol_names); nd;
+ nd = rb_next(nd)) {
struct symbol_name_rb_node *pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
if (strfilter__compare(_filter, pos->sym.name))
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 15a98c3a2a2f..05c8d571a901 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -4,8 +4,9 @@
#include <linux/compiler.h>
#include <stdbool.h>
-#include "intlist.h"
-#include "namespaces.h"
+
+struct intlist;
+struct nsinfo;
/* Probe related configurations */
struct probe_conf {
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index 0b1195cad0e5..0ed1900454eb 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* probe-file.c : operate ftrace k/uprobe events files
*
* Written by Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <errno.h>
#include <fcntl.h>
@@ -20,6 +10,7 @@
#include <sys/types.h>
#include <sys/uio.h>
#include <unistd.h>
+#include "namespaces.h"
#include "util.h"
#include "event.h"
#include "strlist.h"
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index c37fbef1711d..6b40cc691a2d 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1,22 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* probe-finder.c : C expression to kprobe event converter
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
*/
#include <inttypes.h>
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index dda0ac978b1e..6aa7e2352e16 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -342,7 +342,7 @@ static bool is_tracepoint(struct pyrf_event *pevent)
static PyObject*
tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
{
- struct tep_handle *pevent = field->event->pevent;
+ struct tep_handle *pevent = field->event->tep;
void *data = pe->sample.raw_data;
PyObject *ret = NULL;
unsigned long long val;
diff --git a/tools/perf/util/rb_resort.h b/tools/perf/util/rb_resort.h
index a920f702a74d..376e86cb4c3c 100644
--- a/tools/perf/util/rb_resort.h
+++ b/tools/perf/util/rb_resort.h
@@ -140,12 +140,12 @@ struct __name##_sorted *__name = __name##_sorted__new
/* For 'struct intlist' */
#define DECLARE_RESORT_RB_INTLIST(__name, __ilist) \
- DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries, \
+ DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries.rb_root, \
__ilist->rblist.nr_entries)
/* For 'struct machine->threads' */
-#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \
- DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries, \
- __machine->threads[hash_bucket].nr)
+#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \
+ DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries.rb_root, \
+ __machine->threads[hash_bucket].nr)
#endif /* _PERF_RESORT_RB_H_ */
diff --git a/tools/perf/util/rblist.c b/tools/perf/util/rblist.c
index 0efc3258c648..f399b7ec4d8d 100644
--- a/tools/perf/util/rblist.c
+++ b/tools/perf/util/rblist.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on strlist.c by:
* (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com>
- *
- * Licensed under the GPLv2.
*/
#include <errno.h>
@@ -13,8 +12,9 @@
int rblist__add_node(struct rblist *rblist, const void *new_entry)
{
- struct rb_node **p = &rblist->entries.rb_node;
+ struct rb_node **p = &rblist->entries.rb_root.rb_node;
struct rb_node *parent = NULL, *new_node;
+ bool leftmost = true;
while (*p != NULL) {
int rc;
@@ -24,8 +24,10 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
rc = rblist->node_cmp(parent, new_entry);
if (rc > 0)
p = &(*p)->rb_left;
- else if (rc < 0)
+ else if (rc < 0) {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
else
return -EEXIST;
}
@@ -35,7 +37,7 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
return -ENOMEM;
rb_link_node(new_node, parent, p);
- rb_insert_color(new_node, &rblist->entries);
+ rb_insert_color_cached(new_node, &rblist->entries, leftmost);
++rblist->nr_entries;
return 0;
@@ -43,7 +45,7 @@ int rblist__add_node(struct rblist *rblist, const void *new_entry)
void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node)
{
- rb_erase(rb_node, &rblist->entries);
+ rb_erase_cached(rb_node, &rblist->entries);
--rblist->nr_entries;
rblist->node_delete(rblist, rb_node);
}
@@ -52,8 +54,9 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
const void *entry,
bool create)
{
- struct rb_node **p = &rblist->entries.rb_node;
+ struct rb_node **p = &rblist->entries.rb_root.rb_node;
struct rb_node *parent = NULL, *new_node = NULL;
+ bool leftmost = true;
while (*p != NULL) {
int rc;
@@ -63,8 +66,10 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
rc = rblist->node_cmp(parent, entry);
if (rc > 0)
p = &(*p)->rb_left;
- else if (rc < 0)
+ else if (rc < 0) {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
else
return parent;
}
@@ -73,7 +78,8 @@ static struct rb_node *__rblist__findnew(struct rblist *rblist,
new_node = rblist->node_new(rblist, entry);
if (new_node) {
rb_link_node(new_node, parent, p);
- rb_insert_color(new_node, &rblist->entries);
+ rb_insert_color_cached(new_node,
+ &rblist->entries, leftmost);
++rblist->nr_entries;
}
}
@@ -94,7 +100,7 @@ struct rb_node *rblist__findnew(struct rblist *rblist, const void *entry)
void rblist__init(struct rblist *rblist)
{
if (rblist != NULL) {
- rblist->entries = RB_ROOT;
+ rblist->entries = RB_ROOT_CACHED;
rblist->nr_entries = 0;
}
@@ -103,7 +109,7 @@ void rblist__init(struct rblist *rblist)
void rblist__exit(struct rblist *rblist)
{
- struct rb_node *pos, *next = rb_first(&rblist->entries);
+ struct rb_node *pos, *next = rb_first_cached(&rblist->entries);
while (next) {
pos = next;
@@ -124,7 +130,8 @@ struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx)
{
struct rb_node *node;
- for (node = rb_first(&rblist->entries); node; node = rb_next(node)) {
+ for (node = rb_first_cached(&rblist->entries); node;
+ node = rb_next(node)) {
if (!idx--)
return node;
}
diff --git a/tools/perf/util/rblist.h b/tools/perf/util/rblist.h
index 76df15c27f5f..14b232a4d0b6 100644
--- a/tools/perf/util/rblist.h
+++ b/tools/perf/util/rblist.h
@@ -20,7 +20,7 @@
*/
struct rblist {
- struct rb_root entries;
+ struct rb_root_cached entries;
unsigned int nr_entries;
int (*node_cmp)(struct rb_node *rbn, const void *entry);
diff --git a/tools/perf/util/s390-cpumcf-kernel.h b/tools/perf/util/s390-cpumcf-kernel.h
new file mode 100644
index 000000000000..d4356030b504
--- /dev/null
+++ b/tools/perf/util/s390-cpumcf-kernel.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for s390 CPU measurement counter set diagnostic facility
+ *
+ * Copyright IBM Corp. 2019
+ Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
+ * Thomas Richter <tmricht@linux.ibm.com>
+ */
+#ifndef S390_CPUMCF_KERNEL_H
+#define S390_CPUMCF_KERNEL_H
+
+#define S390_CPUMCF_DIAG_DEF 0xfeef /* Counter diagnostic entry ID */
+#define PERF_EVENT_CPUM_CF_DIAG 0xBC000 /* Event: Counter sets */
+
+struct cf_ctrset_entry { /* CPU-M CF counter set entry (8 byte) */
+ unsigned int def:16; /* 0-15 Data Entry Format */
+ unsigned int set:16; /* 16-23 Counter set identifier */
+ unsigned int ctr:16; /* 24-39 Number of stored counters */
+ unsigned int res1:16; /* 40-63 Reserved */
+};
+
+struct cf_trailer_entry { /* CPU-M CF trailer for raw traces (64 byte) */
+ /* 0 - 7 */
+ union {
+ struct {
+ unsigned int clock_base:1; /* TOD clock base */
+ unsigned int speed:1; /* CPU speed */
+ /* Measurement alerts */
+ unsigned int mtda:1; /* Loss of MT ctr. data alert */
+ unsigned int caca:1; /* Counter auth. change alert */
+ unsigned int lcda:1; /* Loss of counter data alert */
+ };
+ unsigned long flags; /* 0-63 All indicators */
+ };
+ /* 8 - 15 */
+ unsigned int cfvn:16; /* 64-79 Ctr First Version */
+ unsigned int csvn:16; /* 80-95 Ctr Second Version */
+ unsigned int cpu_speed:32; /* 96-127 CPU speed */
+ /* 16 - 23 */
+ unsigned long timestamp; /* 128-191 Timestamp (TOD) */
+ /* 24 - 55 */
+ union {
+ struct {
+ unsigned long progusage1;
+ unsigned long progusage2;
+ unsigned long progusage3;
+ unsigned long tod_base;
+ };
+ unsigned long progusage[4];
+ };
+ /* 56 - 63 */
+ unsigned int mach_type:16; /* Machine type */
+ unsigned int res1:16; /* Reserved */
+ unsigned int res2:32; /* Reserved */
+};
+
+#define CPUMF_CTR_SET_BASIC 0 /* Basic Counter Set */
+#define CPUMF_CTR_SET_USER 1 /* Problem-State Counter Set */
+#define CPUMF_CTR_SET_CRYPTO 2 /* Crypto-Activity Counter Set */
+#define CPUMF_CTR_SET_EXT 3 /* Extended Counter Set */
+#define CPUMF_CTR_SET_MT_DIAG 4 /* MT-diagnostic Counter Set */
+#endif
diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
index 68b2570304ec..c215704931dc 100644
--- a/tools/perf/util/s390-cpumsf.c
+++ b/tools/perf/util/s390-cpumsf.c
@@ -162,6 +162,7 @@
#include "auxtrace.h"
#include "s390-cpumsf.h"
#include "s390-cpumsf-kernel.h"
+#include "s390-cpumcf-kernel.h"
#include "config.h"
struct s390_cpumsf {
@@ -184,8 +185,58 @@ struct s390_cpumsf_queue {
struct auxtrace_buffer *buffer;
int cpu;
FILE *logfile;
+ FILE *logfile_ctr;
};
+/* Check if the raw data should be dumped to file. If this is the case and
+ * the file to dump to has not been opened for writing, do so.
+ *
+ * Return 0 on success and greater zero on error so processing continues.
+ */
+static int s390_cpumcf_dumpctr(struct s390_cpumsf *sf,
+ struct perf_sample *sample)
+{
+ struct s390_cpumsf_queue *sfq;
+ struct auxtrace_queue *q;
+ int rc = 0;
+
+ if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu)
+ return rc;
+
+ q = &sf->queues.queue_array[sample->cpu];
+ sfq = q->priv;
+ if (!sfq) /* Queue not yet allocated */
+ return rc;
+
+ if (!sfq->logfile_ctr) {
+ char *name;
+
+ rc = (sf->logdir)
+ ? asprintf(&name, "%s/aux.ctr.%02x",
+ sf->logdir, sample->cpu)
+ : asprintf(&name, "aux.ctr.%02x", sample->cpu);
+ if (rc > 0)
+ sfq->logfile_ctr = fopen(name, "w");
+ if (sfq->logfile_ctr == NULL) {
+ pr_err("Failed to open counter set log file %s, "
+ "continue...\n", name);
+ rc = 1;
+ }
+ free(name);
+ }
+
+ if (sfq->logfile_ctr) {
+ /* See comment above for -4 */
+ size_t n = fwrite(sample->raw_data, sample->raw_size - 4, 1,
+ sfq->logfile_ctr);
+ if (n != 1) {
+ pr_err("Failed to write counter set data\n");
+ rc = 1;
+ }
+ }
+ return rc;
+}
+
/* Display s390 CPU measurement facility basic-sampling data entry */
static bool s390_cpumsf_basic_show(const char *color, size_t pos,
struct hws_basic_entry *basic)
@@ -301,6 +352,11 @@ static bool s390_cpumsf_validate(int machine_type,
*dsdes = 85;
*bsdes = 32;
break;
+ case 2964:
+ case 2965:
+ *dsdes = 112;
+ *bsdes = 32;
+ break;
default:
/* Illegal trailer entry */
return false;
@@ -768,7 +824,7 @@ static int s390_cpumsf_process_queues(struct s390_cpumsf *sf, u64 timestamp)
}
static int s390_cpumsf_synth_error(struct s390_cpumsf *sf, int code, int cpu,
- pid_t pid, pid_t tid, u64 ip)
+ pid_t pid, pid_t tid, u64 ip, u64 timestamp)
{
char msg[MAX_AUXTRACE_ERROR_MSG];
union perf_event event;
@@ -776,7 +832,7 @@ static int s390_cpumsf_synth_error(struct s390_cpumsf *sf, int code, int cpu,
strncpy(msg, "Lost Auxiliary Trace Buffer", sizeof(msg) - 1);
auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
- code, cpu, pid, tid, ip, msg);
+ code, cpu, pid, tid, ip, msg, timestamp);
err = perf_session__deliver_synth_event(sf->session, &event, NULL);
if (err)
@@ -788,11 +844,12 @@ static int s390_cpumsf_synth_error(struct s390_cpumsf *sf, int code, int cpu,
static int s390_cpumsf_lost(struct s390_cpumsf *sf, struct perf_sample *sample)
{
return s390_cpumsf_synth_error(sf, 1, sample->cpu,
- sample->pid, sample->tid, 0);
+ sample->pid, sample->tid, 0,
+ sample->time);
}
static int
-s390_cpumsf_process_event(struct perf_session *session __maybe_unused,
+s390_cpumsf_process_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
struct perf_tool *tool)
@@ -801,6 +858,8 @@ s390_cpumsf_process_event(struct perf_session *session __maybe_unused,
struct s390_cpumsf,
auxtrace);
u64 timestamp = sample->time;
+ struct perf_evsel *ev_bc000;
+
int err = 0;
if (dump_trace)
@@ -811,6 +870,16 @@ s390_cpumsf_process_event(struct perf_session *session __maybe_unused,
return -EINVAL;
}
+ if (event->header.type == PERF_RECORD_SAMPLE &&
+ sample->raw_size) {
+ /* Handle event with raw data */
+ ev_bc000 = perf_evlist__event2evsel(session->evlist, event);
+ if (ev_bc000 &&
+ ev_bc000->attr.config == PERF_EVENT_CPUM_CF_DIAG)
+ err = s390_cpumcf_dumpctr(sf, sample);
+ return err;
+ }
+
if (event->header.type == PERF_RECORD_AUX &&
event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
return s390_cpumsf_lost(sf, sample);
@@ -891,9 +960,15 @@ static void s390_cpumsf_free_queues(struct perf_session *session)
struct s390_cpumsf_queue *sfq = (struct s390_cpumsf_queue *)
queues->queue_array[i].priv;
- if (sfq != NULL && sfq->logfile) {
- fclose(sfq->logfile);
- sfq->logfile = NULL;
+ if (sfq != NULL) {
+ if (sfq->logfile) {
+ fclose(sfq->logfile);
+ sfq->logfile = NULL;
+ }
+ if (sfq->logfile_ctr) {
+ fclose(sfq->logfile_ctr);
+ sfq->logfile_ctr = NULL;
+ }
}
zfree(&queues->queue_array[i].priv);
}
diff --git a/tools/perf/util/s390-sample-raw.c b/tools/perf/util/s390-sample-raw.c
new file mode 100644
index 000000000000..6650f599ed9c
--- /dev/null
+++ b/tools/perf/util/s390-sample-raw.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Thomas Richter <tmricht@linux.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Architecture specific trace_event function. Save event's bc000 raw data
+ * to file. File name is aux.ctr.## where ## stands for the CPU number the
+ * sample was taken from.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include <sys/stat.h>
+#include <linux/compiler.h>
+#include <asm/byteorder.h>
+
+#include "debug.h"
+#include "util.h"
+#include "auxtrace.h"
+#include "session.h"
+#include "evlist.h"
+#include "config.h"
+#include "color.h"
+#include "sample-raw.h"
+#include "s390-cpumcf-kernel.h"
+#include "pmu-events/pmu-events.h"
+
+static size_t ctrset_size(struct cf_ctrset_entry *set)
+{
+ return sizeof(*set) + set->ctr * sizeof(u64);
+}
+
+static bool ctrset_valid(struct cf_ctrset_entry *set)
+{
+ return set->def == S390_CPUMCF_DIAG_DEF;
+}
+
+/* CPU Measurement Counter Facility raw data is a byte stream. It is 8 byte
+ * aligned and might have trailing padding bytes.
+ * Display the raw data on screen.
+ */
+static bool s390_cpumcfdg_testctr(struct perf_sample *sample)
+{
+ size_t len = sample->raw_size, offset = 0;
+ unsigned char *buf = sample->raw_data;
+ struct cf_trailer_entry *te;
+ struct cf_ctrset_entry *cep, ce;
+
+ if (!len)
+ return false;
+ while (offset < len) {
+ cep = (struct cf_ctrset_entry *)(buf + offset);
+ ce.def = be16_to_cpu(cep->def);
+ ce.set = be16_to_cpu(cep->set);
+ ce.ctr = be16_to_cpu(cep->ctr);
+ ce.res1 = be16_to_cpu(cep->res1);
+
+ if (!ctrset_valid(&ce) || offset + ctrset_size(&ce) > len) {
+ /* Raw data for counter sets are always multiple of 8
+ * bytes. Prepending a 4 bytes size field to the
+ * raw data block in the sample causes the perf tool
+ * to append 4 padding bytes to make the raw data part
+ * of the sample a multiple of eight bytes again.
+ *
+ * If the last entry (trailer) is 4 bytes off the raw
+ * area data end, all is good.
+ */
+ if (len - offset - sizeof(*te) == 4)
+ break;
+ pr_err("Invalid counter set entry at %zd\n", offset);
+ return false;
+ }
+ offset += ctrset_size(&ce);
+ }
+ return true;
+}
+
+/* Dump event bc000 on screen, already tested on correctness. */
+static void s390_cpumcfdg_dumptrail(const char *color, size_t offset,
+ struct cf_trailer_entry *tep)
+{
+ struct cf_trailer_entry te;
+
+ te.flags = be64_to_cpu(tep->flags);
+ te.cfvn = be16_to_cpu(tep->cfvn);
+ te.csvn = be16_to_cpu(tep->csvn);
+ te.cpu_speed = be32_to_cpu(tep->cpu_speed);
+ te.timestamp = be64_to_cpu(tep->timestamp);
+ te.progusage1 = be64_to_cpu(tep->progusage1);
+ te.progusage2 = be64_to_cpu(tep->progusage2);
+ te.progusage3 = be64_to_cpu(tep->progusage3);
+ te.tod_base = be64_to_cpu(tep->tod_base);
+ te.mach_type = be16_to_cpu(tep->mach_type);
+ te.res1 = be16_to_cpu(tep->res1);
+ te.res2 = be32_to_cpu(tep->res2);
+
+ color_fprintf(stdout, color, " [%#08zx] Trailer:%c%c%c%c%c"
+ " Cfvn:%d Csvn:%d Speed:%d TOD:%#llx\n",
+ offset, te.clock_base ? 'T' : ' ',
+ te.speed ? 'S' : ' ', te.mtda ? 'M' : ' ',
+ te.caca ? 'C' : ' ', te.lcda ? 'L' : ' ',
+ te.cfvn, te.csvn, te.cpu_speed, te.timestamp);
+ color_fprintf(stdout, color, "\t\t1:%lx 2:%lx 3:%lx TOD-Base:%#llx"
+ " Type:%x\n\n",
+ te.progusage1, te.progusage2, te.progusage3,
+ te.tod_base, te.mach_type);
+}
+
+/* Return starting number of a counter set */
+static int get_counterset_start(int setnr)
+{
+ switch (setnr) {
+ case CPUMF_CTR_SET_BASIC: /* Basic counter set */
+ return 0;
+ case CPUMF_CTR_SET_USER: /* Problem state counter set */
+ return 32;
+ case CPUMF_CTR_SET_CRYPTO: /* Crypto counter set */
+ return 64;
+ case CPUMF_CTR_SET_EXT: /* Extended counter set */
+ return 128;
+ case CPUMF_CTR_SET_MT_DIAG: /* Diagnostic counter set */
+ return 448;
+ default:
+ return -1;
+ }
+}
+
+/* Scan the PMU table and extract the logical name of a counter from the
+ * PMU events table. Input is the counter set and counter number with in the
+ * set. Construct the event number and use this as key. If they match return
+ * the name of this counter.
+ * If no match is found a NULL pointer is returned.
+ */
+static const char *get_counter_name(int set, int nr, struct pmu_events_map *map)
+{
+ int rc, event_nr, wanted = get_counterset_start(set) + nr;
+
+ if (map) {
+ struct pmu_event *evp = map->table;
+
+ for (; evp->name || evp->event || evp->desc; ++evp) {
+ if (evp->name == NULL || evp->event == NULL)
+ continue;
+ rc = sscanf(evp->event, "event=%x", &event_nr);
+ if (rc == 1 && event_nr == wanted)
+ return evp->name;
+ }
+ }
+ return NULL;
+}
+
+static void s390_cpumcfdg_dump(struct perf_sample *sample)
+{
+ size_t i, len = sample->raw_size, offset = 0;
+ unsigned char *buf = sample->raw_data;
+ const char *color = PERF_COLOR_BLUE;
+ struct cf_ctrset_entry *cep, ce;
+ struct pmu_events_map *map;
+ struct perf_pmu pmu;
+ u64 *p;
+
+ memset(&pmu, 0, sizeof(pmu));
+ map = perf_pmu__find_map(&pmu);
+ while (offset < len) {
+ cep = (struct cf_ctrset_entry *)(buf + offset);
+
+ ce.def = be16_to_cpu(cep->def);
+ ce.set = be16_to_cpu(cep->set);
+ ce.ctr = be16_to_cpu(cep->ctr);
+ ce.res1 = be16_to_cpu(cep->res1);
+
+ if (!ctrset_valid(&ce)) { /* Print trailer */
+ s390_cpumcfdg_dumptrail(color, offset,
+ (struct cf_trailer_entry *)cep);
+ return;
+ }
+
+ color_fprintf(stdout, color, " [%#08zx] Counterset:%d"
+ " Counters:%d\n", offset, ce.set, ce.ctr);
+ for (i = 0, p = (u64 *)(cep + 1); i < ce.ctr; ++i, ++p) {
+ const char *ev_name = get_counter_name(ce.set, i, map);
+
+ color_fprintf(stdout, color,
+ "\tCounter:%03d %s Value:%#018lx\n", i,
+ ev_name ?: "<unknown>", be64_to_cpu(*p));
+ }
+ offset += ctrset_size(&ce);
+ }
+}
+
+/* S390 specific trace event function. Check for PERF_RECORD_SAMPLE events
+ * and if the event was triggered by a counter set diagnostic event display
+ * its raw data.
+ * The function is only invoked when the dump flag -D is set.
+ */
+void perf_evlist__s390_sample_raw(struct perf_evlist *evlist, union perf_event *event,
+ struct perf_sample *sample)
+{
+ struct perf_evsel *ev_bc000;
+
+ if (event->header.type != PERF_RECORD_SAMPLE)
+ return;
+
+ ev_bc000 = perf_evlist__event2evsel(evlist, event);
+ if (ev_bc000 == NULL ||
+ ev_bc000->attr.config != PERF_EVENT_CPUM_CF_DIAG)
+ return;
+
+ /* Display raw data on screen */
+ if (!s390_cpumcfdg_testctr(sample)) {
+ pr_err("Invalid counter set data encountered\n");
+ return;
+ }
+ s390_cpumcfdg_dump(sample);
+}
diff --git a/tools/perf/util/sample-raw.c b/tools/perf/util/sample-raw.c
new file mode 100644
index 000000000000..c21e1311fb0f
--- /dev/null
+++ b/tools/perf/util/sample-raw.c
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <string.h>
+#include "evlist.h"
+#include "env.h"
+#include "sample-raw.h"
+
+/*
+ * Check platform the perf data file was created on and perform platform
+ * specific interpretation.
+ */
+void perf_evlist__init_trace_event_sample_raw(struct perf_evlist *evlist)
+{
+ const char *arch_pf = perf_env__arch(evlist->env);
+
+ if (arch_pf && !strcmp("s390", arch_pf))
+ evlist->trace_event_sample_raw = perf_evlist__s390_sample_raw;
+}
diff --git a/tools/perf/util/sample-raw.h b/tools/perf/util/sample-raw.h
new file mode 100644
index 000000000000..95d445c87e93
--- /dev/null
+++ b/tools/perf/util/sample-raw.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SAMPLE_RAW_H
+#define __SAMPLE_RAW_H 1
+
+struct perf_evlist;
+union perf_event;
+struct perf_sample;
+
+void perf_evlist__s390_sample_raw(struct perf_evlist *evlist,
+ union perf_event *event,
+ struct perf_sample *sample);
+
+void perf_evlist__init_trace_event_sample_raw(struct perf_evlist *evlist);
+#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
index 82d28c67e0f3..7b342ce38d99 100644
--- a/tools/perf/util/scripting-engines/Build
+++ b/tools/perf/util/scripting-engines/Build
@@ -1,5 +1,5 @@
-libperf-$(CONFIG_LIBPERL) += trace-event-perl.o
-libperf-$(CONFIG_LIBPYTHON) += trace-event-python.o
+perf-$(CONFIG_LIBPERL) += trace-event-perl.o
+perf-$(CONFIG_LIBPYTHON) += trace-event-python.o
CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index b93f36b887b5..61aa7f3df915 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -37,6 +37,8 @@
#include "../../perf.h"
#include "../callchain.h"
#include "../machine.h"
+#include "../map.h"
+#include "../symbol.h"
#include "../thread.h"
#include "../event.h"
#include "../trace-event.h"
@@ -370,7 +372,7 @@ static void perl_process_tracepoint(struct perf_sample *sample,
ns = nsecs - s * NSEC_PER_SEC;
scripting_context->event_data = data;
- scripting_context->pevent = evsel->tp_format->pevent;
+ scripting_context->pevent = evsel->tp_format->tep;
ENTER;
SAVETMPS;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 87ef16a1b17e..22f52b669871 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -44,6 +44,8 @@
#include "../thread-stack.h"
#include "../trace-event.h"
#include "../call-path.h"
+#include "map.h"
+#include "symbol.h"
#include "thread_map.h"
#include "cpumap.h"
#include "print_binary.h"
@@ -733,8 +735,7 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
Py_FatalError("couldn't create Python dictionary");
pydict_set_item_string_decref(dict, "ev_name", _PyUnicode_FromString(perf_evsel__name(evsel)));
- pydict_set_item_string_decref(dict, "attr", _PyUnicode_FromStringAndSize(
- (const char *)&evsel->attr, sizeof(evsel->attr)));
+ pydict_set_item_string_decref(dict, "attr", _PyBytes_FromStringAndSize((const char *)&evsel->attr, sizeof(evsel->attr)));
pydict_set_item_string_decref(dict_sample, "pid",
_PyLong_FromLong(sample->pid));
@@ -836,7 +837,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
ns = nsecs - s * NSEC_PER_SEC;
scripting_context->event_data = data;
- scripting_context->pevent = evsel->tp_format->pevent;
+ scripting_context->pevent = evsel->tp_format->tep;
context = _PyCapsule_New(scripting_context, NULL, NULL);
@@ -1172,7 +1173,7 @@ static int python_export_call_return(struct db_export *dbe,
u64 comm_db_id = cr->comm ? cr->comm->db_id : 0;
PyObject *t;
- t = tuple_new(11);
+ t = tuple_new(12);
tuple_set_u64(t, 0, cr->db_id);
tuple_set_u64(t, 1, cr->thread->db_id);
@@ -1185,6 +1186,7 @@ static int python_export_call_return(struct db_export *dbe,
tuple_set_u64(t, 8, cr->return_ref);
tuple_set_u64(t, 9, cr->cp->parent->db_id);
tuple_set_s32(t, 10, cr->flags);
+ tuple_set_u64(t, 11, cr->parent_db_id);
call_object(tables->call_return_handler, t, "call_return_table");
@@ -1193,11 +1195,12 @@ static int python_export_call_return(struct db_export *dbe,
return 0;
}
-static int python_process_call_return(struct call_return *cr, void *data)
+static int python_process_call_return(struct call_return *cr, u64 *parent_db_id,
+ void *data)
{
struct db_export *dbe = data;
- return db_export__call_return(dbe, cr);
+ return db_export__call_return(dbe, cr, parent_db_id);
}
static void python_process_general_event(struct perf_sample *sample,
@@ -1494,34 +1497,40 @@ static void _free_command_line(wchar_t **command_line, int num)
static int python_start_script(const char *script, int argc, const char **argv)
{
struct tables *tables = &tables_global;
+ PyMODINIT_FUNC (*initfunc)(void);
#if PY_MAJOR_VERSION < 3
const char **command_line;
#else
wchar_t **command_line;
#endif
- char buf[PATH_MAX];
+ /*
+ * Use a non-const name variable to cope with python 2.6's
+ * PyImport_AppendInittab prototype
+ */
+ char buf[PATH_MAX], name[19] = "perf_trace_context";
int i, err = 0;
FILE *fp;
#if PY_MAJOR_VERSION < 3
+ initfunc = initperf_trace_context;
command_line = malloc((argc + 1) * sizeof(const char *));
command_line[0] = script;
for (i = 1; i < argc + 1; i++)
command_line[i] = argv[i - 1];
#else
+ initfunc = PyInit_perf_trace_context;
command_line = malloc((argc + 1) * sizeof(wchar_t *));
command_line[0] = Py_DecodeLocale(script, NULL);
for (i = 1; i < argc + 1; i++)
command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
#endif
+ PyImport_AppendInittab(name, initfunc);
Py_Initialize();
#if PY_MAJOR_VERSION < 3
- initperf_trace_context();
PySys_SetArgv(argc + 1, (char **)command_line);
#else
- PyInit_perf_trace_context();
PySys_SetArgv(argc + 1, command_line);
#endif
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 5456c84c7dd1..54cf163347f7 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -13,6 +13,8 @@
#include "evlist.h"
#include "evsel.h"
#include "memswap.h"
+#include "map.h"
+#include "symbol.h"
#include "session.h"
#include "tool.h"
#include "sort.h"
@@ -23,9 +25,65 @@
#include "auxtrace.h"
#include "thread.h"
#include "thread-stack.h"
+#include "sample-raw.h"
#include "stat.h"
#include "arch/common.h"
+#ifdef HAVE_ZSTD_SUPPORT
+static int perf_session__process_compressed_event(struct perf_session *session,
+ union perf_event *event, u64 file_offset)
+{
+ void *src;
+ size_t decomp_size, src_size;
+ u64 decomp_last_rem = 0;
+ size_t decomp_len = session->header.env.comp_mmap_len;
+ struct decomp *decomp, *decomp_last = session->decomp_last;
+
+ decomp = mmap(NULL, sizeof(struct decomp) + decomp_len, PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ if (decomp == MAP_FAILED) {
+ pr_err("Couldn't allocate memory for decompression\n");
+ return -1;
+ }
+
+ decomp->file_pos = file_offset;
+ decomp->head = 0;
+
+ if (decomp_last) {
+ decomp_last_rem = decomp_last->size - decomp_last->head;
+ memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
+ decomp->size = decomp_last_rem;
+ }
+
+ src = (void *)event + sizeof(struct compressed_event);
+ src_size = event->pack.header.size - sizeof(struct compressed_event);
+
+ decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
+ &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
+ if (!decomp_size) {
+ munmap(decomp, sizeof(struct decomp) + decomp_len);
+ pr_err("Couldn't decompress data\n");
+ return -1;
+ }
+
+ decomp->size += decomp_size;
+
+ if (session->decomp == NULL) {
+ session->decomp = decomp;
+ session->decomp_last = decomp;
+ } else {
+ session->decomp_last->next = decomp;
+ session->decomp_last = decomp;
+ }
+
+ pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
+
+ return 0;
+}
+#else /* !HAVE_ZSTD_SUPPORT */
+#define perf_session__process_compressed_event perf_session__process_compressed_event_stub
+#endif
+
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
struct perf_tool *tool,
@@ -129,6 +187,7 @@ struct perf_session *perf_session__new(struct perf_data *data,
ordered_events__init(&session->ordered_events,
ordered_events__deliver_event, NULL);
+ perf_env__init(&session->header.env);
if (data) {
if (perf_data__open(data))
goto out_delete;
@@ -137,7 +196,7 @@ struct perf_session *perf_session__new(struct perf_data *data,
if (perf_data__is_read(data)) {
if (perf_session__open(session) < 0)
- goto out_close;
+ goto out_delete;
/*
* set session attributes that are present in perf.data
@@ -147,6 +206,12 @@ struct perf_session *perf_session__new(struct perf_data *data,
perf_session__set_id_hdr_size(session);
perf_session__set_comm_exec(session);
}
+
+ perf_evlist__init_trace_event_sample_raw(session->evlist);
+
+ /* Open the directory data. */
+ if (data->is_dir && perf_data__open_dir(data))
+ goto out_delete;
}
} else {
session->machines.host.env = &perf_env;
@@ -176,8 +241,6 @@ struct perf_session *perf_session__new(struct perf_data *data,
return session;
- out_close:
- perf_data__close(data);
out_delete:
perf_session__delete(session);
out:
@@ -189,6 +252,21 @@ static void perf_session__delete_threads(struct perf_session *session)
machine__delete_threads(&session->machines.host);
}
+static void perf_session__release_decomp_events(struct perf_session *session)
+{
+ struct decomp *next, *decomp;
+ size_t decomp_len;
+ next = session->decomp;
+ decomp_len = session->header.env.comp_mmap_len;
+ do {
+ decomp = next;
+ if (decomp == NULL)
+ break;
+ next = decomp->next;
+ munmap(decomp, decomp_len + sizeof(struct decomp));
+ } while (1);
+}
+
void perf_session__delete(struct perf_session *session)
{
if (session == NULL)
@@ -197,6 +275,7 @@ void perf_session__delete(struct perf_session *session)
auxtrace_index__free(&session->auxtrace_index);
perf_session__destroy_kernel_maps(session);
perf_session__delete_threads(session);
+ perf_session__release_decomp_events(session);
perf_env__exit(&session->header.env);
machines__exit(&session->machines);
if (session->data)
@@ -350,6 +429,14 @@ static int process_stat_round_stub(struct perf_session *perf_session __maybe_unu
return 0;
}
+static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused,
+ u64 file_offset __maybe_unused)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
void perf_tool__fill_defaults(struct perf_tool *tool)
{
if (tool->sample == NULL)
@@ -376,6 +463,10 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
tool->itrace_start = perf_event__process_itrace_start;
if (tool->context_switch == NULL)
tool->context_switch = perf_event__process_switch;
+ if (tool->ksymbol == NULL)
+ tool->ksymbol = perf_event__process_ksymbol;
+ if (tool->bpf_event == NULL)
+ tool->bpf_event = perf_event__process_bpf_event;
if (tool->read == NULL)
tool->read = process_event_sample_stub;
if (tool->throttle == NULL)
@@ -418,6 +509,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
tool->time_conv = process_event_op2_stub;
if (tool->feature == NULL)
tool->feature = process_event_op2_stub;
+ if (tool->compressed == NULL)
+ tool->compressed = perf_session__process_compressed_event;
}
static void swap_sample_id_all(union perf_event *event, void *data)
@@ -554,6 +647,26 @@ static void perf_event__throttle_swap(union perf_event *event,
swap_sample_id_all(event, &event->throttle + 1);
}
+static void perf_event__namespaces_swap(union perf_event *event,
+ bool sample_id_all)
+{
+ u64 i;
+
+ event->namespaces.pid = bswap_32(event->namespaces.pid);
+ event->namespaces.tid = bswap_32(event->namespaces.tid);
+ event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
+
+ for (i = 0; i < event->namespaces.nr_namespaces; i++) {
+ struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
+
+ ns->dev = bswap_64(ns->dev);
+ ns->ino = bswap_64(ns->ino);
+ }
+
+ if (sample_id_all)
+ swap_sample_id_all(event, &event->namespaces.link_info[i]);
+}
+
static u8 revbyte(u8 b)
{
int rev = (b >> 4) | ((b & 0xf) << 4);
@@ -694,7 +807,10 @@ static void perf_event__auxtrace_error_swap(union perf_event *event,
event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
+ event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
+ if (event->auxtrace_error.fmt)
+ event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
}
static void perf_event__thread_map_swap(union perf_event *event,
@@ -791,6 +907,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
[PERF_RECORD_SWITCH] = perf_event__switch_swap,
[PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
+ [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
[PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
[PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
@@ -1065,6 +1182,8 @@ static void dump_event(struct perf_evlist *evlist, union perf_event *event,
file_offset, event->header.size, event->header.type);
trace_event(event);
+ if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
+ evlist->trace_event_sample_raw(evlist, event, sample);
if (sample)
perf_evlist__print_tstamp(evlist, event, sample);
@@ -1188,6 +1307,13 @@ static int deliver_sample_value(struct perf_evlist *evlist,
return 0;
}
+ /*
+ * There's no reason to deliver sample
+ * for zero period, bail out.
+ */
+ if (!sample->period)
+ return 0;
+
return tool->sample(tool, event, sample, sid->evsel, machine);
}
@@ -1305,6 +1431,10 @@ static int machines__deliver_event(struct machines *machines,
case PERF_RECORD_SWITCH:
case PERF_RECORD_SWITCH_CPU_WIDE:
return tool->context_switch(tool, event, sample, machine);
+ case PERF_RECORD_KSYMBOL:
+ return tool->ksymbol(tool, event, sample, machine);
+ case PERF_RECORD_BPF_EVENT:
+ return tool->bpf_event(tool, event, sample, machine);
default:
++evlist->stats.nr_unknown_events;
return -1;
@@ -1345,7 +1475,9 @@ static s64 perf_session__process_user_event(struct perf_session *session,
int fd = perf_data__fd(session->data);
int err;
- dump_event(session->evlist, event, file_offset, &sample);
+ if (event->header.type != PERF_RECORD_COMPRESSED ||
+ tool->compressed == perf_session__process_compressed_event_stub)
+ dump_event(session->evlist, event, file_offset, &sample);
/* These events are processed right away */
switch (event->header.type) {
@@ -1398,6 +1530,11 @@ static s64 perf_session__process_user_event(struct perf_session *session,
return tool->time_conv(session, event);
case PERF_RECORD_HEADER_FEATURE:
return tool->feature(session, event);
+ case PERF_RECORD_COMPRESSED:
+ err = tool->compressed(session, event, file_offset);
+ if (err)
+ dump_event(session->evlist, event, file_offset, &sample);
+ return err;
default:
return -EINVAL;
}
@@ -1680,6 +1817,8 @@ static int perf_session__flush_thread_stacks(struct perf_session *session)
volatile int session_done;
+static int __perf_session__process_decomp_events(struct perf_session *session);
+
static int __perf_session__process_pipe_events(struct perf_session *session)
{
struct ordered_events *oe = &session->ordered_events;
@@ -1760,6 +1899,10 @@ more:
if (skip > 0)
head += skip;
+ err = __perf_session__process_decomp_events(session);
+ if (err)
+ goto out_err;
+
if (!session_done())
goto more;
done:
@@ -1808,6 +1951,39 @@ fetch_mmaped_event(struct perf_session *session,
return event;
}
+static int __perf_session__process_decomp_events(struct perf_session *session)
+{
+ s64 skip;
+ u64 size, file_pos = 0;
+ struct decomp *decomp = session->decomp_last;
+
+ if (!decomp)
+ return 0;
+
+ while (decomp->head < decomp->size && !session_done()) {
+ union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data);
+
+ if (!event)
+ break;
+
+ size = event->header.size;
+
+ if (size < sizeof(struct perf_event_header) ||
+ (skip = perf_session__process_event(session, event, file_pos)) < 0) {
+ pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
+ decomp->file_pos + decomp->head, event->header.size, event->header.type);
+ return -EINVAL;
+ }
+
+ if (skip)
+ size += skip;
+
+ decomp->head += size;
+ }
+
+ return 0;
+}
+
/*
* On 64bit we can mmap the data file in one go. No need for tiny mmap
* slices. On 32bit we use 32MB.
@@ -1820,38 +1996,42 @@ fetch_mmaped_event(struct perf_session *session,
#define NUM_MMAPS 128
#endif
-static int __perf_session__process_events(struct perf_session *session,
- u64 data_offset, u64 data_size,
- u64 file_size)
+struct reader;
+
+typedef s64 (*reader_cb_t)(struct perf_session *session,
+ union perf_event *event,
+ u64 file_offset);
+
+struct reader {
+ int fd;
+ u64 data_size;
+ u64 data_offset;
+ reader_cb_t process;
+};
+
+static int
+reader__process_events(struct reader *rd, struct perf_session *session,
+ struct ui_progress *prog)
{
- struct ordered_events *oe = &session->ordered_events;
- struct perf_tool *tool = session->tool;
- int fd = perf_data__fd(session->data);
+ u64 data_size = rd->data_size;
u64 head, page_offset, file_offset, file_pos, size;
- int err, mmap_prot, mmap_flags, map_idx = 0;
+ int err = 0, mmap_prot, mmap_flags, map_idx = 0;
size_t mmap_size;
char *buf, *mmaps[NUM_MMAPS];
union perf_event *event;
- struct ui_progress prog;
s64 skip;
- perf_tool__fill_defaults(tool);
-
- page_offset = page_size * (data_offset / page_size);
+ page_offset = page_size * (rd->data_offset / page_size);
file_offset = page_offset;
- head = data_offset - page_offset;
-
- if (data_size == 0)
- goto out;
+ head = rd->data_offset - page_offset;
- if (data_offset + data_size < file_size)
- file_size = data_offset + data_size;
+ ui_progress__init_size(prog, data_size, "Processing events...");
- ui_progress__init_size(&prog, file_size, "Processing events...");
+ data_size += rd->data_offset;
mmap_size = MMAP_SIZE;
- if (mmap_size > file_size) {
- mmap_size = file_size;
+ if (mmap_size > data_size) {
+ mmap_size = data_size;
session->one_mmap = true;
}
@@ -1865,12 +2045,12 @@ static int __perf_session__process_events(struct perf_session *session,
mmap_flags = MAP_PRIVATE;
}
remap:
- buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
+ buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
file_offset);
if (buf == MAP_FAILED) {
pr_err("failed to mmap file\n");
err = -errno;
- goto out_err;
+ goto out;
}
mmaps[map_idx] = buf;
map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
@@ -1896,13 +2076,15 @@ more:
size = event->header.size;
+ skip = -EINVAL;
+
if (size < sizeof(struct perf_event_header) ||
- (skip = perf_session__process_event(session, event, file_pos)) < 0) {
- pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
+ (skip = rd->process(session, event, file_pos)) < 0) {
+ pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
file_offset + head, event->header.size,
- event->header.type);
- err = -EINVAL;
- goto out_err;
+ event->header.type, strerror(-skip));
+ err = skip;
+ goto out;
}
if (skip)
@@ -1911,15 +2093,52 @@ more:
head += size;
file_pos += size;
- ui_progress__update(&prog, size);
+ err = __perf_session__process_decomp_events(session);
+ if (err)
+ goto out;
+
+ ui_progress__update(prog, size);
if (session_done())
goto out;
- if (file_pos < file_size)
+ if (file_pos < data_size)
goto more;
out:
+ return err;
+}
+
+static s64 process_simple(struct perf_session *session,
+ union perf_event *event,
+ u64 file_offset)
+{
+ return perf_session__process_event(session, event, file_offset);
+}
+
+static int __perf_session__process_events(struct perf_session *session)
+{
+ struct reader rd = {
+ .fd = perf_data__fd(session->data),
+ .data_size = session->header.data_size,
+ .data_offset = session->header.data_offset,
+ .process = process_simple,
+ };
+ struct ordered_events *oe = &session->ordered_events;
+ struct perf_tool *tool = session->tool;
+ struct ui_progress prog;
+ int err;
+
+ perf_tool__fill_defaults(tool);
+
+ if (rd.data_size == 0)
+ return -1;
+
+ ui_progress__init_size(&prog, rd.data_size, "Processing events...");
+
+ err = reader__process_events(&rd, session, &prog);
+ if (err)
+ goto out_err;
/* do the final flush for ordered samples */
err = ordered_events__flush(oe, OE_FLUSH__FINAL);
if (err)
@@ -1944,20 +2163,13 @@ out_err:
int perf_session__process_events(struct perf_session *session)
{
- u64 size = perf_data__size(session->data);
- int err;
-
if (perf_session__register_idle_thread(session) < 0)
return -ENOMEM;
- if (!perf_data__is_pipe(session->data))
- err = __perf_session__process_events(session,
- session->header.data_offset,
- session->header.data_size, size);
- else
- err = __perf_session__process_pipe_events(session);
+ if (perf_data__is_pipe(session->data))
+ return __perf_session__process_pipe_events(session);
- return err;
+ return __perf_session__process_events(session);
}
bool perf_session__has_traces(struct perf_session *session, const char *msg)
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index d96eccd7d27f..dd8920b745bc 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -8,6 +8,7 @@
#include "machine.h"
#include "data.h"
#include "ordered-events.h"
+#include "util/compress.h"
#include <linux/kernel.h>
#include <linux/rbtree.h>
#include <linux/perf_event.h>
@@ -35,6 +36,19 @@ struct perf_session {
struct ordered_events ordered_events;
struct perf_data *data;
struct perf_tool *tool;
+ u64 bytes_transferred;
+ u64 bytes_compressed;
+ struct zstd_data zstd_data;
+ struct decomp *decomp;
+ struct decomp *decomp_last;
+};
+
+struct decomp {
+ struct decomp *next;
+ u64 file_pos;
+ u64 head;
+ size_t size;
+ char data[];
};
struct perf_tool;
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 64d1f36dee99..5b5a167b43ce 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-
from os import getenv
from subprocess import Popen, PIPE
from re import sub
@@ -55,9 +53,14 @@ ext_sources = [f.strip() for f in open('util/python-ext-sources')
# use full paths with source files
ext_sources = list(map(lambda x: '%s/%s' % (src_perf, x) , ext_sources))
+extra_libraries = []
+if '-DHAVE_LIBNUMA_SUPPORT' in cflags:
+ extra_libraries = [ 'numa' ]
+
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
+ libraries = extra_libraries,
extra_compile_args = cflags,
extra_objects = [libtraceevent, libapikfs],
)
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 6c1a83768eb0..5d2518e89fc4 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -3,17 +3,21 @@
#include <inttypes.h>
#include <regex.h>
#include <linux/mman.h>
+#include <linux/time64.h>
#include "sort.h"
#include "hist.h"
#include "comm.h"
+#include "map.h"
#include "symbol.h"
#include "thread.h"
#include "evsel.h"
#include "evlist.h"
#include "strlist.h"
+#include "strbuf.h"
#include <traceevent/event-parse.h>
#include "mem-events.h"
#include "annotate.h"
+#include "time-utils.h"
#include <linux/kernel.h>
regex_t parent_regex;
@@ -230,8 +234,14 @@ static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
if (sym_l == sym_r)
return 0;
- if (sym_l->inlined || sym_r->inlined)
- return strcmp(sym_l->name, sym_r->name);
+ if (sym_l->inlined || sym_r->inlined) {
+ int ret = strcmp(sym_l->name, sym_r->name);
+
+ if (ret)
+ return ret;
+ if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
+ return 0;
+ }
if (sym_l->start != sym_r->start)
return (int64_t)(sym_r->start - sym_l->start);
@@ -428,8 +438,6 @@ static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
{
struct symbol *sym = he->ms.sym;
- struct map *map = he->ms.map;
- struct perf_evsel *evsel = hists_to_evsel(he->hists);
struct annotation *notes;
double ipc = 0.0, coverage = 0.0;
char tmp[64];
@@ -437,11 +445,6 @@ static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
if (!sym)
return repsep_snprintf(bf, size, "%-*s", width, "-");
- if (!sym->annotate2 && symbol__annotate2(sym, map, evsel,
- &annotation__default_options, NULL) < 0) {
- return 0;
- }
-
notes = symbol__annotation(sym);
if (notes->hit_cycles)
@@ -654,6 +657,42 @@ struct sort_entry sort_socket = {
.se_width_idx = HISTC_SOCKET,
};
+/* --sort time */
+
+static int64_t
+sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return right->time - left->time;
+}
+
+static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ unsigned long secs;
+ unsigned long long nsecs;
+ char he_time[32];
+
+ nsecs = he->time;
+ secs = nsecs / NSEC_PER_SEC;
+ nsecs -= secs * NSEC_PER_SEC;
+
+ if (symbol_conf.nanosecs)
+ snprintf(he_time, sizeof he_time, "%5lu.%09llu: ",
+ secs, nsecs);
+ else
+ timestamp__scnprintf_usec(he->time, he_time,
+ sizeof(he_time));
+
+ return repsep_snprintf(bf, size, "%-.*s", width, he_time);
+}
+
+struct sort_entry sort_time = {
+ .se_header = "Time",
+ .se_cmp = sort__time_cmp,
+ .se_snprintf = hist_entry__time_snprintf,
+ .se_width_idx = HISTC_TIME,
+};
+
/* --sort trace */
static char *get_trace_output(struct hist_entry *he)
@@ -1634,6 +1673,7 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
+ DIM(SORT_TIME, "time", sort_time),
};
#undef DIM
@@ -3068,3 +3108,54 @@ void reset_output_field(void)
reset_dimensions();
perf_hpp__reset_output_field(&perf_hpp_list);
}
+
+#define INDENT (3*8 + 1)
+
+static void add_key(struct strbuf *sb, const char *str, int *llen)
+{
+ if (*llen >= 75) {
+ strbuf_addstr(sb, "\n\t\t\t ");
+ *llen = INDENT;
+ }
+ strbuf_addf(sb, " %s", str);
+ *llen += strlen(str) + 1;
+}
+
+static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
+ int *llen)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ add_key(sb, s[i].name, llen);
+}
+
+static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
+ int *llen)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ add_key(sb, s[i].name, llen);
+}
+
+const char *sort_help(const char *prefix)
+{
+ struct strbuf sb;
+ char *s;
+ int len = strlen(prefix) + INDENT;
+
+ strbuf_init(&sb, 300);
+ strbuf_addstr(&sb, prefix);
+ add_hpp_sort_string(&sb, hpp_sort_dimensions,
+ ARRAY_SIZE(hpp_sort_dimensions), &len);
+ add_sort_string(&sb, common_sort_dimensions,
+ ARRAY_SIZE(common_sort_dimensions), &len);
+ add_sort_string(&sb, bstack_sort_dimensions,
+ ARRAY_SIZE(bstack_sort_dimensions), &len);
+ add_sort_string(&sb, memory_sort_dimensions,
+ ARRAY_SIZE(memory_sort_dimensions), &len);
+ s = strbuf_detach(&sb, NULL);
+ strbuf_release(&sb);
+ return s;
+}
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 130fe37fe2df..ce376a73f964 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -9,7 +9,8 @@
#include <linux/list.h>
#include "cache.h"
#include <linux/rbtree.h>
-#include "symbol.h"
+#include "map_symbol.h"
+#include "symbol_conf.h"
#include "string.h"
#include "callchain.h"
#include "values.h"
@@ -46,6 +47,12 @@ extern struct sort_entry sort_srcline;
extern enum sort_type sort__first_dimension;
extern const char default_mem_sort_order[];
+struct res_sample {
+ u64 time;
+ int cpu;
+ int tid;
+};
+
struct he_stat {
u64 period;
u64 period_sys;
@@ -134,10 +141,13 @@ struct hist_entry {
char *srcfile;
struct symbol *parent;
struct branch_info *branch_info;
+ long time;
struct hists *hists;
struct mem_info *mem_info;
void *raw_data;
u32 raw_size;
+ int num_res;
+ struct res_sample *res_samples;
void *trace_output;
struct perf_hpp_list *hpp_list;
struct hist_entry *parent_he;
@@ -145,8 +155,8 @@ struct hist_entry {
union {
/* this is for hierarchical entry structure */
struct {
- struct rb_root hroot_in;
- struct rb_root hroot_out;
+ struct rb_root_cached hroot_in;
+ struct rb_root_cached hroot_out;
}; /* non-leaf entries */
struct rb_root sorted_chain; /* leaf entry has callchains */
};
@@ -230,6 +240,7 @@ enum sort_type {
SORT_DSO_SIZE,
SORT_CGROUP_ID,
SORT_SYM_IPC_NULL,
+ SORT_TIME,
/* branch stack specific sort keys */
__SORT_BRANCH_STACK,
@@ -285,6 +296,8 @@ void reset_output_field(void);
void sort__setup_elide(FILE *fp);
void perf_hpp__set_elide(int idx, bool elide);
+const char *sort_help(const char *prefix);
+
int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
bool is_strict_order(const char *order);
diff --git a/tools/perf/util/srccode.c b/tools/perf/util/srccode.c
index fcc8630f6dff..684b155c222a 100644
--- a/tools/perf/util/srccode.c
+++ b/tools/perf/util/srccode.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Manage printing of source lines
* Copyright (c) 2017, Intel Corporation.
* Author: Andi Kleen
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include "linux/list.h"
#include <stdlib.h>
diff --git a/tools/perf/util/srccode.h b/tools/perf/util/srccode.h
index e500a746d5f1..1b5ed769779c 100644
--- a/tools/perf/util/srccode.h
+++ b/tools/perf/util/srccode.h
@@ -1,6 +1,19 @@
#ifndef SRCCODE_H
#define SRCCODE_H 1
+struct srccode_state {
+ char *srcfile;
+ unsigned line;
+};
+
+static inline void srccode_state_init(struct srccode_state *state)
+{
+ state->srcfile = NULL;
+ state->line = 0;
+}
+
+void srccode_state_free(struct srccode_state *state);
+
/* Result is not 0 terminated */
char *find_sourceline(char *fn, unsigned line, int *lenp);
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index dc86597d0cc4..10ca1533937e 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -104,7 +104,7 @@ static struct symbol *new_inline_sym(struct dso *dso,
} else {
/* create a fake symbol for the inline frame */
inline_sym = symbol__new(base_sym ? base_sym->start : 0,
- base_sym ? base_sym->end : 0,
+ base_sym ? (base_sym->end - base_sym->start) : 0,
base_sym ? base_sym->binding : 0,
base_sym ? base_sym->type : 0,
funcname);
@@ -594,11 +594,12 @@ struct srcline_node {
struct rb_node rb_node;
};
-void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline)
+void srcline__tree_insert(struct rb_root_cached *tree, u64 addr, char *srcline)
{
- struct rb_node **p = &tree->rb_node;
+ struct rb_node **p = &tree->rb_root.rb_node;
struct rb_node *parent = NULL;
struct srcline_node *i, *node;
+ bool leftmost = true;
node = zalloc(sizeof(struct srcline_node));
if (!node) {
@@ -614,16 +615,18 @@ void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline)
i = rb_entry(parent, struct srcline_node, rb_node);
if (addr < i->addr)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&node->rb_node, parent, p);
- rb_insert_color(&node->rb_node, tree);
+ rb_insert_color_cached(&node->rb_node, tree, leftmost);
}
-char *srcline__tree_find(struct rb_root *tree, u64 addr)
+char *srcline__tree_find(struct rb_root_cached *tree, u64 addr)
{
- struct rb_node *n = tree->rb_node;
+ struct rb_node *n = tree->rb_root.rb_node;
while (n) {
struct srcline_node *i = rb_entry(n, struct srcline_node,
@@ -640,15 +643,15 @@ char *srcline__tree_find(struct rb_root *tree, u64 addr)
return NULL;
}
-void srcline__tree_delete(struct rb_root *tree)
+void srcline__tree_delete(struct rb_root_cached *tree)
{
struct srcline_node *pos;
- struct rb_node *next = rb_first(tree);
+ struct rb_node *next = rb_first_cached(tree);
while (next) {
pos = rb_entry(next, struct srcline_node, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, tree);
+ rb_erase_cached(&pos->rb_node, tree);
free_srcline(pos->srcline);
zfree(&pos);
}
@@ -682,28 +685,32 @@ void inline_node__delete(struct inline_node *node)
free(node);
}
-void inlines__tree_insert(struct rb_root *tree, struct inline_node *inlines)
+void inlines__tree_insert(struct rb_root_cached *tree,
+ struct inline_node *inlines)
{
- struct rb_node **p = &tree->rb_node;
+ struct rb_node **p = &tree->rb_root.rb_node;
struct rb_node *parent = NULL;
const u64 addr = inlines->addr;
struct inline_node *i;
+ bool leftmost = true;
while (*p != NULL) {
parent = *p;
i = rb_entry(parent, struct inline_node, rb_node);
if (addr < i->addr)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&inlines->rb_node, parent, p);
- rb_insert_color(&inlines->rb_node, tree);
+ rb_insert_color_cached(&inlines->rb_node, tree, leftmost);
}
-struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr)
+struct inline_node *inlines__tree_find(struct rb_root_cached *tree, u64 addr)
{
- struct rb_node *n = tree->rb_node;
+ struct rb_node *n = tree->rb_root.rb_node;
while (n) {
struct inline_node *i = rb_entry(n, struct inline_node,
@@ -720,15 +727,15 @@ struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr)
return NULL;
}
-void inlines__tree_delete(struct rb_root *tree)
+void inlines__tree_delete(struct rb_root_cached *tree)
{
struct inline_node *pos;
- struct rb_node *next = rb_first(tree);
+ struct rb_node *next = rb_first_cached(tree);
while (next) {
pos = rb_entry(next, struct inline_node, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, tree);
+ rb_erase_cached(&pos->rb_node, tree);
inline_node__delete(pos);
}
}
diff --git a/tools/perf/util/srcline.h b/tools/perf/util/srcline.h
index 5762212dc342..b11a0aaaa676 100644
--- a/tools/perf/util/srcline.h
+++ b/tools/perf/util/srcline.h
@@ -19,11 +19,11 @@ void free_srcline(char *srcline);
char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line);
/* insert the srcline into the DSO, which will take ownership */
-void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline);
+void srcline__tree_insert(struct rb_root_cached *tree, u64 addr, char *srcline);
/* find previously inserted srcline */
-char *srcline__tree_find(struct rb_root *tree, u64 addr);
+char *srcline__tree_find(struct rb_root_cached *tree, u64 addr);
/* delete all srclines within the tree */
-void srcline__tree_delete(struct rb_root *tree);
+void srcline__tree_delete(struct rb_root_cached *tree);
#define SRCLINE_UNKNOWN ((char *) "??:0")
@@ -46,10 +46,11 @@ struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr,
void inline_node__delete(struct inline_node *node);
/* insert the inline node list into the DSO, which will take ownership */
-void inlines__tree_insert(struct rb_root *tree, struct inline_node *inlines);
+void inlines__tree_insert(struct rb_root_cached *tree,
+ struct inline_node *inlines);
/* find previously inserted inline node list */
-struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr);
+struct inline_node *inlines__tree_find(struct rb_root_cached *tree, u64 addr);
/* delete all nodes within the tree of inline_node s */
-void inlines__tree_delete(struct rb_root *tree);
+void inlines__tree_delete(struct rb_root_cached *tree);
#endif /* PERF_SRCLINE_H */
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 665ee374fc01..4c53bae5644b 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -2,6 +2,7 @@
#include <inttypes.h>
#include <linux/time64.h>
#include <math.h>
+#include "color.h"
#include "evlist.h"
#include "evsel.h"
#include "stat.h"
@@ -17,11 +18,6 @@
#define CNTR_NOT_SUPPORTED "<not supported>"
#define CNTR_NOT_COUNTED "<not counted>"
-static bool is_duration_time(struct perf_evsel *evsel)
-{
- return !strcmp(evsel->name, "duration_time");
-}
-
static void print_running(struct perf_stat_config *config,
u64 run, u64 ena)
{
@@ -92,9 +88,17 @@ static void aggr_printout(struct perf_stat_config *config,
config->csv_sep);
break;
case AGGR_NONE:
- fprintf(config->output, "CPU%*d%s",
- config->csv_output ? 0 : -4,
- perf_evsel__cpus(evsel)->map[id], config->csv_sep);
+ if (evsel->percore) {
+ fprintf(config->output, "S%d-C%*d%s",
+ cpu_map__id_to_socket(id),
+ config->csv_output ? 0 : -5,
+ cpu_map__id_to_cpu(id), config->csv_sep);
+ } else {
+ fprintf(config->output, "CPU%*d%s ",
+ config->csv_output ? 0 : -5,
+ perf_evsel__cpus(evsel)->map[id],
+ config->csv_sep);
+ }
break;
case AGGR_THREAD:
fprintf(config->output, "%*s-%*d%s",
@@ -598,6 +602,41 @@ static void aggr_cb(struct perf_stat_config *config,
}
}
+static void print_counter_aggrdata(struct perf_stat_config *config,
+ struct perf_evsel *counter, int s,
+ char *prefix, bool metric_only,
+ bool *first)
+{
+ struct aggr_data ad;
+ FILE *output = config->output;
+ u64 ena, run, val;
+ int id, nr;
+ double uval;
+
+ ad.id = id = config->aggr_map->map[s];
+ ad.val = ad.ena = ad.run = 0;
+ ad.nr = 0;
+ if (!collect_data(config, counter, aggr_cb, &ad))
+ return;
+
+ nr = ad.nr;
+ ena = ad.ena;
+ run = ad.run;
+ val = ad.val;
+ if (*first && metric_only) {
+ *first = false;
+ aggr_printout(config, counter, id, nr);
+ }
+ if (prefix && !metric_only)
+ fprintf(output, "%s", prefix);
+
+ uval = val * counter->scale;
+ printout(config, id, nr, counter, uval, prefix,
+ run, ena, 1.0, &rt_stat);
+ if (!metric_only)
+ fputc('\n', output);
+}
+
static void print_aggr(struct perf_stat_config *config,
struct perf_evlist *evlist,
char *prefix)
@@ -605,9 +644,7 @@ static void print_aggr(struct perf_stat_config *config,
bool metric_only = config->metric_only;
FILE *output = config->output;
struct perf_evsel *counter;
- int s, id, nr;
- double uval;
- u64 ena, run, val;
+ int s;
bool first;
if (!(config->aggr_map || config->aggr_get_id))
@@ -620,36 +657,14 @@ static void print_aggr(struct perf_stat_config *config,
* Without each counter has its own line.
*/
for (s = 0; s < config->aggr_map->nr; s++) {
- struct aggr_data ad;
if (prefix && metric_only)
fprintf(output, "%s", prefix);
- ad.id = id = config->aggr_map->map[s];
first = true;
evlist__for_each_entry(evlist, counter) {
- if (is_duration_time(counter))
- continue;
-
- ad.val = ad.ena = ad.run = 0;
- ad.nr = 0;
- if (!collect_data(config, counter, aggr_cb, &ad))
- continue;
- nr = ad.nr;
- ena = ad.ena;
- run = ad.run;
- val = ad.val;
- if (first && metric_only) {
- first = false;
- aggr_printout(config, counter, id, nr);
- }
- if (prefix && !metric_only)
- fprintf(output, "%s", prefix);
-
- uval = val * counter->scale;
- printout(config, id, nr, counter, uval, prefix,
- run, ena, 1.0, &rt_stat);
- if (!metric_only)
- fputc('\n', output);
+ print_counter_aggrdata(config, counter, s,
+ prefix, metric_only,
+ &first);
}
if (metric_only)
fputc('\n', output);
@@ -847,8 +862,6 @@ static void print_no_aggr_metric(struct perf_stat_config *config,
if (prefix)
fputs(prefix, config->output);
evlist__for_each_entry(evlist, counter) {
- if (is_duration_time(counter))
- continue;
if (first) {
aggr_printout(config, counter, cpu, 0);
first = false;
@@ -905,8 +918,6 @@ static void print_metric_headers(struct perf_stat_config *config,
/* Print metrics headers only */
evlist__for_each_entry(evlist, counter) {
- if (is_duration_time(counter))
- continue;
os.evsel = counter;
out.ctx = &os;
out.print_metric = print_metric_header;
@@ -1100,6 +1111,30 @@ static void print_footer(struct perf_stat_config *config)
"the same PMU. Try reorganizing the group.\n");
}
+static void print_percore(struct perf_stat_config *config,
+ struct perf_evsel *counter, char *prefix)
+{
+ bool metric_only = config->metric_only;
+ FILE *output = config->output;
+ int s;
+ bool first = true;
+
+ if (!(config->aggr_map || config->aggr_get_id))
+ return;
+
+ for (s = 0; s < config->aggr_map->nr; s++) {
+ if (prefix && metric_only)
+ fprintf(output, "%s", prefix);
+
+ print_counter_aggrdata(config, counter, s,
+ prefix, metric_only,
+ &first);
+ }
+
+ if (metric_only)
+ fputc('\n', output);
+}
+
void
perf_evlist__print_counters(struct perf_evlist *evlist,
struct perf_stat_config *config,
@@ -1135,15 +1170,11 @@ perf_evlist__print_counters(struct perf_evlist *evlist,
break;
case AGGR_THREAD:
evlist__for_each_entry(evlist, counter) {
- if (is_duration_time(counter))
- continue;
print_aggr_thread(config, _target, counter, prefix);
}
break;
case AGGR_GLOBAL:
evlist__for_each_entry(evlist, counter) {
- if (is_duration_time(counter))
- continue;
print_counter_aggr(config, counter, prefix);
}
if (metric_only)
@@ -1154,9 +1185,10 @@ perf_evlist__print_counters(struct perf_evlist *evlist,
print_no_aggr_metric(config, evlist, prefix);
else {
evlist__for_each_entry(evlist, counter) {
- if (is_duration_time(counter))
- continue;
- print_counter(config, counter, prefix);
+ if (counter->percore)
+ print_percore(config, counter, prefix);
+ else
+ print_counter(config, counter, prefix);
}
}
break;
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 3c22c58b3e90..83d8094be4fe 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -168,7 +168,7 @@ static void reset_stat(struct runtime_stat *st)
struct rb_node *pos, *next;
rblist = &st->value_list;
- next = rb_first(&rblist->entries);
+ next = rb_first_cached(&rblist->entries);
while (next) {
pos = next;
next = rb_next(pos);
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 4d40515307b8..c3115d939b0b 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -277,9 +277,11 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
if (!evsel->snapshot)
perf_evsel__compute_deltas(evsel, cpu, thread, count);
perf_counts_values__scale(count, config->scale, NULL);
- if (config->aggr_mode == AGGR_NONE)
- perf_stat__update_shadow_stats(evsel, count->val, cpu,
- &rt_stat);
+ if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) {
+ perf_stat__update_shadow_stats(evsel, count->val,
+ cpu, &rt_stat);
+ }
+
if (config->aggr_mode == AGGR_THREAD) {
if (config->stats)
perf_stat__update_shadow_stats(evsel,
@@ -291,10 +293,8 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
break;
case AGGR_GLOBAL:
aggr->val += count->val;
- if (config->scale) {
- aggr->ena += count->ena;
- aggr->run += count->run;
- }
+ aggr->ena += count->ena;
+ aggr->run += count->run;
case AGGR_UNSET:
default:
break;
@@ -442,10 +442,8 @@ int create_perf_stat_counter(struct perf_evsel *evsel,
struct perf_event_attr *attr = &evsel->attr;
struct perf_evsel *leader = evsel->leader;
- if (config->scale) {
- attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
- PERF_FORMAT_TOTAL_TIME_RUNNING;
- }
+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING;
/*
* The event is part of non trivial group, let's enable
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
index 9de5434bb49e..af45c6fd97db 100644
--- a/tools/perf/util/strlist.c
+++ b/tools/perf/util/strlist.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com>
- *
- * Licensed under the GPLv2.
*/
#include "strlist.h"
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
index d58f1e08b170..7e82c71dcc42 100644
--- a/tools/perf/util/strlist.h
+++ b/tools/perf/util/strlist.h
@@ -57,7 +57,7 @@ static inline unsigned int strlist__nr_entries(const struct strlist *slist)
/* For strlist iteration */
static inline struct str_node *strlist__first(struct strlist *slist)
{
- struct rb_node *rn = rb_first(&slist->rblist.entries);
+ struct rb_node *rn = rb_first_cached(&slist->rblist.entries);
return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
}
static inline struct str_node *strlist__next(struct str_node *sn)
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index f735ee038713..fab8a048d31b 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* svghelper.c - helper functions for outputting svg
*
@@ -5,11 +6,6 @@
*
* Authors:
* Arjan van de Ven <arjan@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
*/
#include <inttypes.h>
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index dca7dfae69ad..4ad106a5f2c0 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -6,6 +6,8 @@
#include <unistd.h>
#include <inttypes.h>
+#include "map.h"
+#include "map_groups.h"
#include "symbol.h"
#include "demangle-java.h"
#include "demangle-rust.h"
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index 7119df77dc0b..17edbd4f6f85 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -3,6 +3,7 @@
#include "util.h"
#include <errno.h>
+#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
#include <string.h>
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 48efad6d0f90..5cbad55cd99d 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -6,6 +6,7 @@
#include <string.h>
#include <linux/kernel.h>
#include <linux/mman.h>
+#include <linux/time64.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/param.h>
@@ -17,6 +18,7 @@
#include "util.h"
#include "debug.h"
#include "machine.h"
+#include "map.h"
#include "symbol.h"
#include "strlist.h"
#include "intlist.h"
@@ -38,15 +40,18 @@ int vmlinux_path__nr_entries;
char **vmlinux_path;
struct symbol_conf symbol_conf = {
+ .nanosecs = false,
.use_modules = true,
.try_vmlinux_path = true,
.demangle = true,
.demangle_kernel = false,
.cumulate_callchain = true,
+ .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */
.show_hist_headers = true,
.symfs = "",
.event_group = true,
.inline_name = true,
+ .res_sample = 0,
};
static enum dso_binary_type binary_type_symtab[] = {
@@ -163,7 +168,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
return arch__choose_best_symbol(syma, symb);
}
-void symbols__fixup_duplicate(struct rb_root *symbols)
+void symbols__fixup_duplicate(struct rb_root_cached *symbols)
{
struct rb_node *nd;
struct symbol *curr, *next;
@@ -171,7 +176,7 @@ void symbols__fixup_duplicate(struct rb_root *symbols)
if (symbol_conf.allow_aliases)
return;
- nd = rb_first(symbols);
+ nd = rb_first_cached(symbols);
while (nd) {
curr = rb_entry(nd, struct symbol, rb_node);
@@ -186,20 +191,20 @@ again:
continue;
if (choose_best_symbol(curr, next) == SYMBOL_A) {
- rb_erase(&next->rb_node, symbols);
+ rb_erase_cached(&next->rb_node, symbols);
symbol__delete(next);
goto again;
} else {
nd = rb_next(&curr->rb_node);
- rb_erase(&curr->rb_node, symbols);
+ rb_erase_cached(&curr->rb_node, symbols);
symbol__delete(curr);
}
}
}
-void symbols__fixup_end(struct rb_root *symbols)
+void symbols__fixup_end(struct rb_root_cached *symbols)
{
- struct rb_node *nd, *prevnd = rb_first(symbols);
+ struct rb_node *nd, *prevnd = rb_first_cached(symbols);
struct symbol *curr, *prev;
if (prevnd == NULL)
@@ -282,25 +287,27 @@ void symbol__delete(struct symbol *sym)
free(((void *)sym) - symbol_conf.priv_size);
}
-void symbols__delete(struct rb_root *symbols)
+void symbols__delete(struct rb_root_cached *symbols)
{
struct symbol *pos;
- struct rb_node *next = rb_first(symbols);
+ struct rb_node *next = rb_first_cached(symbols);
while (next) {
pos = rb_entry(next, struct symbol, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase(&pos->rb_node, symbols);
+ rb_erase_cached(&pos->rb_node, symbols);
symbol__delete(pos);
}
}
-void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
+void __symbols__insert(struct rb_root_cached *symbols,
+ struct symbol *sym, bool kernel)
{
- struct rb_node **p = &symbols->rb_node;
+ struct rb_node **p = &symbols->rb_root.rb_node;
struct rb_node *parent = NULL;
const u64 ip = sym->start;
struct symbol *s;
+ bool leftmost = true;
if (kernel) {
const char *name = sym->name;
@@ -318,26 +325,28 @@ void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
s = rb_entry(parent, struct symbol, rb_node);
if (ip < s->start)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&sym->rb_node, parent, p);
- rb_insert_color(&sym->rb_node, symbols);
+ rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
}
-void symbols__insert(struct rb_root *symbols, struct symbol *sym)
+void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
{
__symbols__insert(symbols, sym, false);
}
-static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
+static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
{
struct rb_node *n;
if (symbols == NULL)
return NULL;
- n = symbols->rb_node;
+ n = symbols->rb_root.rb_node;
while (n) {
struct symbol *s = rb_entry(n, struct symbol, rb_node);
@@ -353,9 +362,9 @@ static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
return NULL;
}
-static struct symbol *symbols__first(struct rb_root *symbols)
+static struct symbol *symbols__first(struct rb_root_cached *symbols)
{
- struct rb_node *n = rb_first(symbols);
+ struct rb_node *n = rb_first_cached(symbols);
if (n)
return rb_entry(n, struct symbol, rb_node);
@@ -363,9 +372,9 @@ static struct symbol *symbols__first(struct rb_root *symbols)
return NULL;
}
-static struct symbol *symbols__last(struct rb_root *symbols)
+static struct symbol *symbols__last(struct rb_root_cached *symbols)
{
- struct rb_node *n = rb_last(symbols);
+ struct rb_node *n = rb_last(&symbols->rb_root);
if (n)
return rb_entry(n, struct symbol, rb_node);
@@ -383,11 +392,12 @@ static struct symbol *symbols__next(struct symbol *sym)
return NULL;
}
-static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
+static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym)
{
- struct rb_node **p = &symbols->rb_node;
+ struct rb_node **p = &symbols->rb_root.rb_node;
struct rb_node *parent = NULL;
struct symbol_name_rb_node *symn, *s;
+ bool leftmost = true;
symn = container_of(sym, struct symbol_name_rb_node, sym);
@@ -396,19 +406,21 @@ static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
if (strcmp(sym->name, s->sym.name) < 0)
p = &(*p)->rb_left;
- else
+ else {
p = &(*p)->rb_right;
+ leftmost = false;
+ }
}
rb_link_node(&symn->rb_node, parent, p);
- rb_insert_color(&symn->rb_node, symbols);
+ rb_insert_color_cached(&symn->rb_node, symbols, leftmost);
}
-static void symbols__sort_by_name(struct rb_root *symbols,
- struct rb_root *source)
+static void symbols__sort_by_name(struct rb_root_cached *symbols,
+ struct rb_root_cached *source)
{
struct rb_node *nd;
- for (nd = rb_first(source); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
symbols__insert_by_name(symbols, pos);
}
@@ -431,7 +443,7 @@ int symbol__match_symbol_name(const char *name, const char *str,
return arch__compare_symbol_names(name, str);
}
-static struct symbol *symbols__find_by_name(struct rb_root *symbols,
+static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols,
const char *name,
enum symbol_tag_include includes)
{
@@ -441,7 +453,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
if (symbols == NULL)
return NULL;
- n = symbols->rb_node;
+ n = symbols->rb_root.rb_node;
while (n) {
int cmp;
@@ -644,7 +656,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
{
struct symbol *sym;
struct dso *dso = arg;
- struct rb_root *root = &dso->symbols;
+ struct rb_root_cached *root = &dso->symbols;
if (!symbol_type__filter(type))
return 0;
@@ -681,14 +693,14 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
struct map *curr_map;
struct symbol *pos;
int count = 0;
- struct rb_root old_root = dso->symbols;
- struct rb_root *root = &dso->symbols;
- struct rb_node *next = rb_first(root);
+ struct rb_root_cached old_root = dso->symbols;
+ struct rb_root_cached *root = &dso->symbols;
+ struct rb_node *next = rb_first_cached(root);
if (!kmaps)
return -1;
- *root = RB_ROOT;
+ *root = RB_ROOT_CACHED;
while (next) {
char *module;
@@ -696,8 +708,8 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
pos = rb_entry(next, struct symbol, rb_node);
next = rb_next(&pos->rb_node);
- rb_erase_init(&pos->rb_node, &old_root);
-
+ rb_erase_cached(&pos->rb_node, &old_root);
+ RB_CLEAR_NODE(&pos->rb_node);
module = strchr(pos->name, '\t');
if (module)
*module = '\0';
@@ -710,6 +722,8 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
}
pos->start -= curr_map->start - curr_map->pgoff;
+ if (pos->end > curr_map->end)
+ pos->end = curr_map->end;
if (pos->end)
pos->end -= curr_map->start - curr_map->pgoff;
symbols__insert(&curr_map->dso->symbols, pos);
@@ -734,8 +748,8 @@ static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso,
struct map *curr_map = initial_map;
struct symbol *pos;
int count = 0, moved = 0;
- struct rb_root *root = &dso->symbols;
- struct rb_node *next = rb_first(root);
+ struct rb_root_cached *root = &dso->symbols;
+ struct rb_node *next = rb_first_cached(root);
int kernel_range = 0;
bool x86_64;
@@ -849,7 +863,7 @@ static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso,
}
add_symbol:
if (curr_map != initial_map) {
- rb_erase(&pos->rb_node, root);
+ rb_erase_cached(&pos->rb_node, root);
symbols__insert(&curr_map->dso->symbols, pos);
++moved;
} else
@@ -857,7 +871,7 @@ add_symbol:
continue;
discard_symbol:
- rb_erase(&pos->rb_node, root);
+ rb_erase_cached(&pos->rb_node, root);
symbol__delete(pos);
}
@@ -1441,6 +1455,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
return true;
+ case DSO_BINARY_TYPE__BPF_PROG_INFO:
case DSO_BINARY_TYPE__NOT_FOUND:
default:
return false;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 14d9d438e7e2..9a8fe012910a 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -5,16 +5,13 @@
#include <linux/types.h>
#include <stdbool.h>
#include <stdint.h>
-#include "map.h"
-#include "../perf.h"
#include <linux/list.h>
#include <linux/rbtree.h>
#include <stdio.h>
-#include <byteswap.h>
-#include <libgen.h>
-#include "build-id.h"
-#include "event.h"
+#include "map_symbol.h"
+#include "branch.h"
#include "path.h"
+#include "symbol_conf.h"
#ifdef HAVE_LIBELF_SUPPORT
#include <libelf.h>
@@ -24,6 +21,10 @@
#include "dso.h"
+struct map;
+struct map_groups;
+struct option;
+
/*
* libelf 0.8.x and earlier do not support ELF_C_READ_MMAP;
* for newer versions we can use mmap to reduce memory usage:
@@ -68,7 +69,7 @@ struct symbol {
};
void symbol__delete(struct symbol *sym);
-void symbols__delete(struct rb_root *symbols);
+void symbols__delete(struct rb_root_cached *symbols);
/* symbols__for_each_entry - iterate over symbols (rb_root)
*
@@ -77,7 +78,7 @@ void symbols__delete(struct rb_root *symbols);
* @nd: the 'struct rb_node *' to use as a temporary storage
*/
#define symbols__for_each_entry(symbols, pos, nd) \
- for (nd = rb_first(symbols); \
+ for (nd = rb_first_cached(symbols); \
nd && (pos = rb_entry(nd, struct symbol, rb_node)); \
nd = rb_next(nd))
@@ -89,69 +90,6 @@ static inline size_t symbol__size(const struct symbol *sym)
struct strlist;
struct intlist;
-struct symbol_conf {
- unsigned short priv_size;
- bool try_vmlinux_path,
- init_annotation,
- force,
- ignore_vmlinux,
- ignore_vmlinux_buildid,
- show_kernel_path,
- use_modules,
- allow_aliases,
- sort_by_name,
- show_nr_samples,
- show_total_period,
- use_callchain,
- cumulate_callchain,
- show_branchflag_count,
- exclude_other,
- show_cpu_utilization,
- initialized,
- kptr_restrict,
- event_group,
- demangle,
- demangle_kernel,
- filter_relative,
- show_hist_headers,
- branch_callstack,
- has_filter,
- show_ref_callgraph,
- hide_unresolved,
- raw_trace,
- report_hierarchy,
- inline_name;
- const char *vmlinux_name,
- *kallsyms_name,
- *source_prefix,
- *field_sep,
- *graph_function;
- const char *default_guest_vmlinux_name,
- *default_guest_kallsyms,
- *default_guest_modules;
- const char *guestmount;
- const char *dso_list_str,
- *comm_list_str,
- *pid_list_str,
- *tid_list_str,
- *sym_list_str,
- *col_width_list_str,
- *bt_stop_list_str;
- struct strlist *dso_list,
- *comm_list,
- *sym_list,
- *dso_from_list,
- *dso_to_list,
- *sym_from_list,
- *sym_to_list,
- *bt_stop_list;
- struct intlist *pid_list,
- *tid_list;
- const char *symfs;
-};
-
-extern struct symbol_conf symbol_conf;
-
struct symbol_name_rb_node {
struct rb_node rb_node;
struct symbol sym;
@@ -178,19 +116,6 @@ struct ref_reloc_sym {
u64 unrelocated_addr;
};
-struct map_symbol {
- struct map *map;
- struct symbol *sym;
-};
-
-struct addr_map_symbol {
- struct map *map;
- struct symbol *sym;
- u64 addr;
- u64 al_addr;
- u64 phys_addr;
-};
-
struct branch_info {
struct addr_map_symbol from;
struct addr_map_symbol to;
@@ -310,10 +235,11 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss);
char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name);
-void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel);
-void symbols__insert(struct rb_root *symbols, struct symbol *sym);
-void symbols__fixup_duplicate(struct rb_root *symbols);
-void symbols__fixup_end(struct rb_root *symbols);
+void __symbols__insert(struct rb_root_cached *symbols, struct symbol *sym,
+ bool kernel);
+void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym);
+void symbols__fixup_duplicate(struct rb_root_cached *symbols);
+void symbols__fixup_end(struct rb_root_cached *symbols);
void map_groups__fixup_end(struct map_groups *mg);
typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h
new file mode 100644
index 000000000000..6c55fa6fccec
--- /dev/null
+++ b/tools/perf/util/symbol_conf.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_SYMBOL_CONF
+#define __PERF_SYMBOL_CONF 1
+
+#include <stdbool.h>
+
+struct strlist;
+struct intlist;
+
+struct symbol_conf {
+ bool nanosecs;
+ unsigned short priv_size;
+ bool try_vmlinux_path,
+ init_annotation,
+ force,
+ ignore_vmlinux,
+ ignore_vmlinux_buildid,
+ show_kernel_path,
+ use_modules,
+ allow_aliases,
+ sort_by_name,
+ show_nr_samples,
+ show_total_period,
+ use_callchain,
+ cumulate_callchain,
+ show_branchflag_count,
+ exclude_other,
+ show_cpu_utilization,
+ initialized,
+ kptr_restrict,
+ event_group,
+ demangle,
+ demangle_kernel,
+ filter_relative,
+ show_hist_headers,
+ branch_callstack,
+ has_filter,
+ show_ref_callgraph,
+ hide_unresolved,
+ raw_trace,
+ report_hierarchy,
+ inline_name;
+ const char *vmlinux_name,
+ *kallsyms_name,
+ *source_prefix,
+ *field_sep,
+ *graph_function;
+ const char *default_guest_vmlinux_name,
+ *default_guest_kallsyms,
+ *default_guest_modules;
+ const char *guestmount;
+ const char *dso_list_str,
+ *comm_list_str,
+ *pid_list_str,
+ *tid_list_str,
+ *sym_list_str,
+ *col_width_list_str,
+ *bt_stop_list_str;
+ unsigned long time_quantum;
+ struct strlist *dso_list,
+ *comm_list,
+ *sym_list,
+ *dso_from_list,
+ *dso_to_list,
+ *sym_from_list,
+ *sym_to_list,
+ *bt_stop_list;
+ struct intlist *pid_list,
+ *tid_list;
+ const char *symfs;
+ int res_sample;
+};
+
+extern struct symbol_conf symbol_conf;
+
+#endif // __PERF_SYMBOL_CONF
diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
index ed0205cc7942..02e89b02c2ce 100644
--- a/tools/perf/util/symbol_fprintf.c
+++ b/tools/perf/util/symbol_fprintf.c
@@ -3,6 +3,7 @@
#include <inttypes.h>
#include <stdio.h>
+#include "map.h"
#include "symbol.h"
size_t symbol__fprintf(struct symbol *sym, FILE *fp)
@@ -64,7 +65,7 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
struct rb_node *nd;
struct symbol_name_rb_node *pos;
- for (nd = rb_first(&dso->symbol_names); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
fprintf(fp, "%s\n", pos->sym.name);
}
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
index 3393d7ee9401..c2037ac533f3 100644
--- a/tools/perf/util/syscalltbl.c
+++ b/tools/perf/util/syscalltbl.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* System call table mapper
*
* (C) 2016 Arnaldo Carvalho de Melo <acme@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include "syscalltbl.h"
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c
index 21c4d9b23c24..3852d07c49bd 100644
--- a/tools/perf/util/target.c
+++ b/tools/perf/util/target.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Helper functions for handling target threads/cpus
*
* Copyright (C) 2012, LG Electronics, Namhyung Kim <namhyung.kim@lge.com>
- *
- * Released under the GPL v2.
*/
#include "target.h"
diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c
index d52f27f373ce..60c9d955c4d7 100644
--- a/tools/perf/util/thread-stack.c
+++ b/tools/perf/util/thread-stack.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* thread-stack.c: Synthesize a thread's stack using call / return events
* Copyright (c) 2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <linux/rbtree.h>
@@ -20,6 +11,7 @@
#include "thread.h"
#include "event.h"
#include "machine.h"
+#include "env.h"
#include "util.h"
#include "debug.h"
#include "symbol.h"
@@ -29,24 +21,41 @@
#define STACK_GROWTH 2048
+/*
+ * State of retpoline detection.
+ *
+ * RETPOLINE_NONE: no retpoline detection
+ * X86_RETPOLINE_POSSIBLE: x86 retpoline possible
+ * X86_RETPOLINE_DETECTED: x86 retpoline detected
+ */
+enum retpoline_state_t {
+ RETPOLINE_NONE,
+ X86_RETPOLINE_POSSIBLE,
+ X86_RETPOLINE_DETECTED,
+};
+
/**
* struct thread_stack_entry - thread stack entry.
* @ret_addr: return address
* @timestamp: timestamp (if known)
* @ref: external reference (e.g. db_id of sample)
* @branch_count: the branch count when the entry was created
+ * @db_id: id used for db-export
* @cp: call path
* @no_call: a 'call' was not seen
* @trace_end: a 'call' but trace ended
+ * @non_call: a branch but not a 'call' to the start of a different symbol
*/
struct thread_stack_entry {
u64 ret_addr;
u64 timestamp;
u64 ref;
u64 branch_count;
+ u64 db_id;
struct call_path *cp;
bool no_call;
bool trace_end;
+ bool non_call;
};
/**
@@ -62,6 +71,7 @@ struct thread_stack_entry {
* @crp: call/return processor
* @comm: current comm
* @arr_sz: size of array if this is the first element of an array
+ * @rstate: used to detect retpolines
*/
struct thread_stack {
struct thread_stack_entry *stack;
@@ -74,6 +84,7 @@ struct thread_stack {
struct call_return_processor *crp;
struct comm *comm;
unsigned int arr_sz;
+ enum retpoline_state_t rstate;
};
/*
@@ -113,10 +124,16 @@ static int thread_stack__init(struct thread_stack *ts, struct thread *thread,
if (err)
return err;
- if (thread->mg && thread->mg->machine)
- ts->kernel_start = machine__kernel_start(thread->mg->machine);
- else
+ if (thread->mg && thread->mg->machine) {
+ struct machine *machine = thread->mg->machine;
+ const char *arch = perf_env__arch(machine->env);
+
+ ts->kernel_start = machine__kernel_start(machine);
+ if (!strcmp(arch, "x86"))
+ ts->rstate = X86_RETPOLINE_POSSIBLE;
+ } else {
ts->kernel_start = 1ULL << 63;
+ }
ts->crp = crp;
return 0;
@@ -256,20 +273,31 @@ static int thread_stack__call_return(struct thread *thread,
.comm = ts->comm,
.db_id = 0,
};
+ u64 *parent_db_id;
tse = &ts->stack[idx];
cr.cp = tse->cp;
cr.call_time = tse->timestamp;
cr.return_time = timestamp;
cr.branch_count = ts->branch_count - tse->branch_count;
+ cr.db_id = tse->db_id;
cr.call_ref = tse->ref;
cr.return_ref = ref;
if (tse->no_call)
cr.flags |= CALL_RETURN_NO_CALL;
if (no_return)
cr.flags |= CALL_RETURN_NO_RETURN;
+ if (tse->non_call)
+ cr.flags |= CALL_RETURN_NON_CALL;
- return crp->process(&cr, crp->data);
+ /*
+ * The parent db_id must be assigned before exporting the child. Note
+ * it is not possible to export the parent first because its information
+ * is not yet complete because its 'return' has not yet been processed.
+ */
+ parent_db_id = idx ? &(tse - 1)->db_id : NULL;
+
+ return crp->process(&cr, parent_db_id, crp->data);
}
static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
@@ -458,7 +486,7 @@ void thread_stack__sample(struct thread *thread, int cpu,
}
struct call_return_processor *
-call_return_processor__new(int (*process)(struct call_return *cr, void *data),
+call_return_processor__new(int (*process)(struct call_return *cr, u64 *parent_db_id, void *data),
void *data)
{
struct call_return_processor *crp;
@@ -493,6 +521,9 @@ static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
struct thread_stack_entry *tse;
int err;
+ if (!cp)
+ return -ENOMEM;
+
if (ts->cnt == ts->sz) {
err = thread_stack__grow(ts);
if (err)
@@ -507,6 +538,8 @@ static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
tse->cp = cp;
tse->no_call = no_call;
tse->trace_end = trace_end;
+ tse->non_call = false;
+ tse->db_id = 0;
return 0;
}
@@ -528,14 +561,16 @@ static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
timestamp, ref, false);
}
- if (ts->stack[ts->cnt - 1].ret_addr == ret_addr) {
+ if (ts->stack[ts->cnt - 1].ret_addr == ret_addr &&
+ !ts->stack[ts->cnt - 1].non_call) {
return thread_stack__call_return(thread, ts, --ts->cnt,
timestamp, ref, false);
} else {
size_t i = ts->cnt - 1;
while (i--) {
- if (ts->stack[i].ret_addr != ret_addr)
+ if (ts->stack[i].ret_addr != ret_addr ||
+ ts->stack[i].non_call)
continue;
i += 1;
while (ts->cnt > i) {
@@ -576,13 +611,28 @@ static int thread_stack__bottom(struct thread_stack *ts,
cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
ts->kernel_start);
- if (!cp)
- return -ENOMEM;
return thread_stack__push_cp(ts, ip, sample->time, ref, cp,
true, false);
}
+static int thread_stack__pop_ks(struct thread *thread, struct thread_stack *ts,
+ struct perf_sample *sample, u64 ref)
+{
+ u64 tm = sample->time;
+ int err;
+
+ /* Return to userspace, so pop all kernel addresses */
+ while (thread_stack__in_kernel(ts)) {
+ err = thread_stack__call_return(thread, ts, --ts->cnt,
+ tm, ref, true);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int thread_stack__no_call_return(struct thread *thread,
struct thread_stack *ts,
struct perf_sample *sample,
@@ -590,36 +640,36 @@ static int thread_stack__no_call_return(struct thread *thread,
struct addr_location *to_al, u64 ref)
{
struct call_path_root *cpr = ts->crp->cpr;
+ struct call_path *root = &cpr->call_path;
+ struct symbol *fsym = from_al->sym;
+ struct symbol *tsym = to_al->sym;
struct call_path *cp, *parent;
u64 ks = ts->kernel_start;
+ u64 addr = sample->addr;
+ u64 tm = sample->time;
+ u64 ip = sample->ip;
int err;
- if (sample->ip >= ks && sample->addr < ks) {
+ if (ip >= ks && addr < ks) {
/* Return to userspace, so pop all kernel addresses */
while (thread_stack__in_kernel(ts)) {
err = thread_stack__call_return(thread, ts, --ts->cnt,
- sample->time, ref,
- true);
+ tm, ref, true);
if (err)
return err;
}
/* If the stack is empty, push the userspace address */
if (!ts->cnt) {
- cp = call_path__findnew(cpr, &cpr->call_path,
- to_al->sym, sample->addr,
- ts->kernel_start);
- if (!cp)
- return -ENOMEM;
- return thread_stack__push_cp(ts, 0, sample->time, ref,
- cp, true, false);
+ cp = call_path__findnew(cpr, root, tsym, addr, ks);
+ return thread_stack__push_cp(ts, 0, tm, ref, cp, true,
+ false);
}
- } else if (thread_stack__in_kernel(ts) && sample->ip < ks) {
+ } else if (thread_stack__in_kernel(ts) && ip < ks) {
/* Return to userspace, so pop all kernel addresses */
while (thread_stack__in_kernel(ts)) {
err = thread_stack__call_return(thread, ts, --ts->cnt,
- sample->time, ref,
- true);
+ tm, ref, true);
if (err)
return err;
}
@@ -628,21 +678,59 @@ static int thread_stack__no_call_return(struct thread *thread,
if (ts->cnt)
parent = ts->stack[ts->cnt - 1].cp;
else
- parent = &cpr->call_path;
+ parent = root;
- /* This 'return' had no 'call', so push and pop top of stack */
- cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip,
- ts->kernel_start);
- if (!cp)
- return -ENOMEM;
+ if (parent->sym == from_al->sym) {
+ /*
+ * At the bottom of the stack, assume the missing 'call' was
+ * before the trace started. So, pop the current symbol and push
+ * the 'to' symbol.
+ */
+ if (ts->cnt == 1) {
+ err = thread_stack__call_return(thread, ts, --ts->cnt,
+ tm, ref, false);
+ if (err)
+ return err;
+ }
+
+ if (!ts->cnt) {
+ cp = call_path__findnew(cpr, root, tsym, addr, ks);
+
+ return thread_stack__push_cp(ts, addr, tm, ref, cp,
+ true, false);
+ }
+
+ /*
+ * Otherwise assume the 'return' is being used as a jump (e.g.
+ * retpoline) and just push the 'to' symbol.
+ */
+ cp = call_path__findnew(cpr, parent, tsym, addr, ks);
+
+ err = thread_stack__push_cp(ts, 0, tm, ref, cp, true, false);
+ if (!err)
+ ts->stack[ts->cnt - 1].non_call = true;
+
+ return err;
+ }
+
+ /*
+ * Assume 'parent' has not yet returned, so push 'to', and then push and
+ * pop 'from'.
+ */
- err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp,
- true, false);
+ cp = call_path__findnew(cpr, parent, tsym, addr, ks);
+
+ err = thread_stack__push_cp(ts, addr, tm, ref, cp, true, false);
if (err)
return err;
- return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref,
- to_al->sym);
+ cp = call_path__findnew(cpr, cp, fsym, ip, ks);
+
+ err = thread_stack__push_cp(ts, ip, tm, ref, cp, true, false);
+ if (err)
+ return err;
+
+ return thread_stack__call_return(thread, ts, --ts->cnt, tm, ref, false);
}
static int thread_stack__trace_begin(struct thread *thread,
@@ -680,8 +768,6 @@ static int thread_stack__trace_end(struct thread_stack *ts,
cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
ts->kernel_start);
- if (!cp)
- return -ENOMEM;
ret_addr = sample->ip + sample->insn_len;
@@ -689,6 +775,70 @@ static int thread_stack__trace_end(struct thread_stack *ts,
false, true);
}
+static bool is_x86_retpoline(const char *name)
+{
+ const char *p = strstr(name, "__x86_indirect_thunk_");
+
+ return p == name || !strcmp(name, "__indirect_thunk_start");
+}
+
+/*
+ * x86 retpoline functions pollute the call graph. This function removes them.
+ * This does not handle function return thunks, nor is there any improvement
+ * for the handling of inline thunks or extern thunks.
+ */
+static int thread_stack__x86_retpoline(struct thread_stack *ts,
+ struct perf_sample *sample,
+ struct addr_location *to_al)
+{
+ struct thread_stack_entry *tse = &ts->stack[ts->cnt - 1];
+ struct call_path_root *cpr = ts->crp->cpr;
+ struct symbol *sym = tse->cp->sym;
+ struct symbol *tsym = to_al->sym;
+ struct call_path *cp;
+
+ if (sym && is_x86_retpoline(sym->name)) {
+ /*
+ * This is a x86 retpoline fn. It pollutes the call graph by
+ * showing up everywhere there is an indirect branch, but does
+ * not itself mean anything. Here the top-of-stack is removed,
+ * by decrementing the stack count, and then further down, the
+ * resulting top-of-stack is replaced with the actual target.
+ * The result is that the retpoline functions will no longer
+ * appear in the call graph. Note this only affects the call
+ * graph, since all the original branches are left unchanged.
+ */
+ ts->cnt -= 1;
+ sym = ts->stack[ts->cnt - 2].cp->sym;
+ if (sym && sym == tsym && to_al->addr != tsym->start) {
+ /*
+ * Target is back to the middle of the symbol we came
+ * from so assume it is an indirect jmp and forget it
+ * altogether.
+ */
+ ts->cnt -= 1;
+ return 0;
+ }
+ } else if (sym && sym == tsym) {
+ /*
+ * Target is back to the symbol we came from so assume it is an
+ * indirect jmp and forget it altogether.
+ */
+ ts->cnt -= 1;
+ return 0;
+ }
+
+ cp = call_path__findnew(cpr, ts->stack[ts->cnt - 2].cp, tsym,
+ sample->addr, ts->kernel_start);
+ if (!cp)
+ return -ENOMEM;
+
+ /* Replace the top-of-stack with the actual target */
+ ts->stack[ts->cnt - 1].cp = cp;
+
+ return 0;
+}
+
int thread_stack__process(struct thread *thread, struct comm *comm,
struct perf_sample *sample,
struct addr_location *from_al,
@@ -696,6 +846,7 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
struct call_return_processor *crp)
{
struct thread_stack *ts = thread__stack(thread, sample->cpu);
+ enum retpoline_state_t rstate;
int err = 0;
if (ts && !ts->crp) {
@@ -711,6 +862,10 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
ts->comm = comm;
}
+ rstate = ts->rstate;
+ if (rstate == X86_RETPOLINE_DETECTED)
+ ts->rstate = X86_RETPOLINE_POSSIBLE;
+
/* Flush stack on exec */
if (ts->comm != comm && thread->pid_ == thread->tid) {
err = __thread_stack__flush(thread, ts);
@@ -745,14 +900,38 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
to_al->sym, sample->addr,
ts->kernel_start);
- if (!cp)
- return -ENOMEM;
err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
cp, false, trace_end);
+
+ /*
+ * A call to the same symbol but not the start of the symbol,
+ * may be the start of a x86 retpoline.
+ */
+ if (!err && rstate == X86_RETPOLINE_POSSIBLE && to_al->sym &&
+ from_al->sym == to_al->sym &&
+ to_al->addr != to_al->sym->start)
+ ts->rstate = X86_RETPOLINE_DETECTED;
+
} else if (sample->flags & PERF_IP_FLAG_RETURN) {
- if (!sample->ip || !sample->addr)
+ if (!sample->addr) {
+ u32 return_from_kernel = PERF_IP_FLAG_SYSCALLRET |
+ PERF_IP_FLAG_INTERRUPT;
+
+ if (!(sample->flags & return_from_kernel))
+ return 0;
+
+ /* Pop kernel stack */
+ return thread_stack__pop_ks(thread, ts, sample, ref);
+ }
+
+ if (!sample->ip)
return 0;
+ /* x86 retpoline 'return' doesn't match the stack */
+ if (rstate == X86_RETPOLINE_DETECTED && ts->cnt > 2 &&
+ ts->stack[ts->cnt - 1].ret_addr != sample->addr)
+ return thread_stack__x86_retpoline(ts, sample, to_al);
+
err = thread_stack__pop_cp(thread, ts, sample->addr,
sample->time, ref, from_al->sym);
if (err) {
@@ -765,6 +944,25 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
err = thread_stack__trace_begin(thread, ts, sample->time, ref);
} else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
err = thread_stack__trace_end(ts, sample, ref);
+ } else if (sample->flags & PERF_IP_FLAG_BRANCH &&
+ from_al->sym != to_al->sym && to_al->sym &&
+ to_al->addr == to_al->sym->start) {
+ struct call_path_root *cpr = ts->crp->cpr;
+ struct call_path *cp;
+
+ /*
+ * The compiler might optimize a call/ret combination by making
+ * it a jmp. Make that visible by recording on the stack a
+ * branch to the start of a different symbol. Note, that means
+ * when a ret pops the stack, all jmps must be popped off first.
+ */
+ cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
+ to_al->sym, sample->addr,
+ ts->kernel_start);
+ err = thread_stack__push_cp(ts, 0, sample->time, ref, cp, false,
+ false);
+ if (!err)
+ ts->stack[ts->cnt - 1].non_call = true;
}
return err;
diff --git a/tools/perf/util/thread-stack.h b/tools/perf/util/thread-stack.h
index 1f626f4a1c40..71e15d4ec533 100644
--- a/tools/perf/util/thread-stack.h
+++ b/tools/perf/util/thread-stack.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* thread-stack.h: Synthesize a thread's stack using call / return events
* Copyright (c) 2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#ifndef __PERF_THREAD_STACK_H
@@ -35,10 +26,13 @@ struct call_path;
*
* CALL_RETURN_NO_CALL: 'return' but no matching 'call'
* CALL_RETURN_NO_RETURN: 'call' but no matching 'return'
+ * CALL_RETURN_NON_CALL: a branch but not a 'call' to the start of a different
+ * symbol
*/
enum {
CALL_RETURN_NO_CALL = 1 << 0,
CALL_RETURN_NO_RETURN = 1 << 1,
+ CALL_RETURN_NON_CALL = 1 << 2,
};
/**
@@ -52,6 +46,7 @@ enum {
* @call_ref: external reference to 'call' sample (e.g. db_id)
* @return_ref: external reference to 'return' sample (e.g. db_id)
* @db_id: id used for db-export
+ * @parent_db_id: id of parent call used for db-export
* @flags: Call/Return flags
*/
struct call_return {
@@ -64,6 +59,7 @@ struct call_return {
u64 call_ref;
u64 return_ref;
u64 db_id;
+ u64 parent_db_id;
u32 flags;
};
@@ -76,7 +72,7 @@ struct call_return {
*/
struct call_return_processor {
struct call_path_root *cpr;
- int (*process)(struct call_return *cr, void *data);
+ int (*process)(struct call_return *cr, u64 *parent_db_id, void *data);
void *data;
};
@@ -90,7 +86,7 @@ void thread_stack__free(struct thread *thread);
size_t thread_stack__depth(struct thread *thread, int cpu);
struct call_return_processor *
-call_return_processor__new(int (*process)(struct call_return *cr, void *data),
+call_return_processor__new(int (*process)(struct call_return *cr, u64 *parent_db_id, void *data),
void *data);
void call_return_processor__free(struct call_return_processor *crp);
int thread_stack__process(struct thread *thread, struct comm *comm,
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index c83372329f89..b413ba5b9835 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -12,7 +12,10 @@
#include "debug.h"
#include "namespaces.h"
#include "comm.h"
+#include "map.h"
+#include "symbol.h"
#include "unwind.h"
+#include "callchain.h"
#include <api/fs/fs.h>
@@ -130,7 +133,7 @@ void thread__put(struct thread *thread)
}
}
-struct namespaces *thread__namespaces(const struct thread *thread)
+static struct namespaces *__thread__namespaces(const struct thread *thread)
{
if (list_empty(&thread->namespaces_list))
return NULL;
@@ -138,10 +141,21 @@ struct namespaces *thread__namespaces(const struct thread *thread)
return list_first_entry(&thread->namespaces_list, struct namespaces, list);
}
+struct namespaces *thread__namespaces(const struct thread *thread)
+{
+ struct namespaces *ns;
+
+ down_read((struct rw_semaphore *)&thread->namespaces_lock);
+ ns = __thread__namespaces(thread);
+ up_read((struct rw_semaphore *)&thread->namespaces_lock);
+
+ return ns;
+}
+
static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
struct namespaces_event *event)
{
- struct namespaces *new, *curr = thread__namespaces(thread);
+ struct namespaces *new, *curr = __thread__namespaces(thread);
new = namespaces__new(event);
if (!new)
@@ -325,7 +339,7 @@ static int thread__prepare_access(struct thread *thread)
{
int err = 0;
- if (symbol_conf.use_callchain)
+ if (dwarf_callchain_users)
err = __thread__prepare_access(thread);
return err;
@@ -392,3 +406,25 @@ struct thread *thread__main_thread(struct machine *machine, struct thread *threa
return machine__find_thread(machine, thread->pid_, thread->pid_);
}
+
+int thread__memcpy(struct thread *thread, struct machine *machine,
+ void *buf, u64 ip, int len, bool *is64bit)
+{
+ u8 cpumode = PERF_RECORD_MISC_USER;
+ struct addr_location al;
+ long offset;
+
+ if (machine__kernel_ip(machine, ip))
+ cpumode = PERF_RECORD_MISC_KERNEL;
+
+ if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso ||
+ al.map->dso->data.status == DSO_DATA_STATUS_ERROR ||
+ map__load(al.map) < 0)
+ return -1;
+
+ offset = al.map->map_ip(al.map, ip);
+ if (is64bit)
+ *is64bit = al.map->dso->is_64_bit;
+
+ return dso__data_read_offset(al.map->dso, machine, offset, buf, len);
+}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 712dd48cc0ca..cf8375c017a0 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -5,14 +5,18 @@
#include <linux/refcount.h>
#include <linux/rbtree.h>
#include <linux/list.h>
+#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
-#include "symbol.h"
-#include "map.h"
+#include "srccode.h"
+#include "symbol_conf.h"
#include <strlist.h>
#include <intlist.h>
#include "rwsem.h"
+struct addr_location;
+struct map;
+struct namespaces_event;
struct thread_stack;
struct unwind_libunwind_ops;
@@ -109,6 +113,9 @@ struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
struct addr_location *al);
+int thread__memcpy(struct thread *thread, struct machine *machine,
+ void *buf, u64 ip, int len, bool *is64bit);
+
static inline void *thread__priv(struct thread *thread)
{
return thread->priv;
diff --git a/tools/perf/util/time-utils.c b/tools/perf/util/time-utils.c
index 6193b46050a5..20663a460df3 100644
--- a/tools/perf/util/time-utils.c
+++ b/tools/perf/util/time-utils.c
@@ -11,6 +11,8 @@
#include "perf.h"
#include "debug.h"
#include "time-utils.h"
+#include "session.h"
+#include "evlist.h"
int parse_nsec_time(const char *str, u64 *ptime)
{
@@ -374,7 +376,7 @@ bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
struct perf_time_interval *ptime;
int i;
- if ((timestamp == 0) || (num == 0))
+ if ((!ptime_buf) || (timestamp == 0) || (num == 0))
return false;
if (num == 1)
@@ -396,6 +398,53 @@ bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
return (i == num) ? true : false;
}
+int perf_time__parse_for_ranges(const char *time_str,
+ struct perf_session *session,
+ struct perf_time_interval **ranges,
+ int *range_size, int *range_num)
+{
+ struct perf_time_interval *ptime_range;
+ int size, num, ret;
+
+ ptime_range = perf_time__range_alloc(time_str, &size);
+ if (!ptime_range)
+ return -ENOMEM;
+
+ if (perf_time__parse_str(ptime_range, time_str) != 0) {
+ if (session->evlist->first_sample_time == 0 &&
+ session->evlist->last_sample_time == 0) {
+ pr_err("HINT: no first/last sample time found in perf data.\n"
+ "Please use latest perf binary to execute 'perf record'\n"
+ "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ num = perf_time__percent_parse_str(
+ ptime_range, size,
+ time_str,
+ session->evlist->first_sample_time,
+ session->evlist->last_sample_time);
+
+ if (num < 0) {
+ pr_err("Invalid time string\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ } else {
+ num = 1;
+ }
+
+ *range_size = size;
+ *range_num = num;
+ *ranges = ptime_range;
+ return 0;
+
+error:
+ free(ptime_range);
+ return ret;
+}
+
int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz)
{
u64 sec = timestamp / NSEC_PER_SEC;
@@ -404,6 +453,14 @@ int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz)
return scnprintf(buf, sz, "%"PRIu64".%06"PRIu64, sec, usec);
}
+int timestamp__scnprintf_nsec(u64 timestamp, char *buf, size_t sz)
+{
+ u64 sec = timestamp / NSEC_PER_SEC,
+ nsec = timestamp % NSEC_PER_SEC;
+
+ return scnprintf(buf, sz, "%" PRIu64 ".%09" PRIu64, sec, nsec);
+}
+
int fetch_current_timestamp(char *buf, size_t sz)
{
struct timeval tv;
diff --git a/tools/perf/util/time-utils.h b/tools/perf/util/time-utils.h
index 70b177d2b98c..72a42ea1d513 100644
--- a/tools/perf/util/time-utils.h
+++ b/tools/perf/util/time-utils.h
@@ -23,7 +23,14 @@ bool perf_time__skip_sample(struct perf_time_interval *ptime, u64 timestamp);
bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
int num, u64 timestamp);
+struct perf_session;
+
+int perf_time__parse_for_ranges(const char *str, struct perf_session *session,
+ struct perf_time_interval **ranges,
+ int *range_size, int *range_num);
+
int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz);
+int timestamp__scnprintf_nsec(u64 timestamp, char *buf, size_t sz);
int fetch_current_timestamp(char *buf, size_t sz);
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index 56e4ca54020a..9096a6e3de59 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -28,6 +28,7 @@ typedef int (*event_attr_op)(struct perf_tool *tool,
typedef int (*event_op2)(struct perf_session *session, union perf_event *event);
typedef s64 (*event_op3)(struct perf_session *session, union perf_event *event);
+typedef int (*event_op4)(struct perf_session *session, union perf_event *event, u64 data);
typedef int (*event_oe)(struct perf_tool *tool, union perf_event *event,
struct ordered_events *oe);
@@ -53,7 +54,10 @@ struct perf_tool {
itrace_start,
context_switch,
throttle,
- unthrottle;
+ unthrottle,
+ ksymbol,
+ bpf_event;
+
event_attr_op attr;
event_attr_op event_update;
event_op2 tracing_data;
@@ -69,6 +73,7 @@ struct perf_tool {
stat,
stat_round,
feature;
+ event_op4 compressed;
event_op3 auxtrace;
bool ordered_events;
bool ordering_requires_timestamps;
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
index 4c8da8c4435f..251bbf124fb0 100644
--- a/tools/perf/util/top.c
+++ b/tools/perf/util/top.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
*
* Refactored from builtin-top.c, see that files for further copyright notes.
- *
- * Released under the GPL v2. (and only v2, not any later version)
*/
#include "cpumap.h"
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 8ad8e755127b..806a11b334d3 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -1,22 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2008,2009, Steven Rostedt <srostedt@redhat.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include "util.h"
#include <dirent.h>
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index ad74be1f0e42..62bc61155dd1 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -1,22 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2009, Steven Rostedt <srostedt@redhat.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <stdio.h>
#include <stdlib.h>
@@ -111,7 +95,7 @@ raw_field_value(struct tep_event *event, const char *name, void *data)
unsigned long long read_size(struct tep_event *event, void *ptr, int size)
{
- return tep_read_number(event->pevent, ptr, size);
+ return tep_read_number(event->tep, ptr, size);
}
void event_format__fprintf(struct tep_event *event,
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index efe2f58cff4e..13c1cf60d1bc 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -1,22 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2009, Steven Rostedt <srostedt@redhat.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <dirent.h>
#include <stdio.h>
@@ -442,7 +426,7 @@ ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe)
tep_set_flag(pevent, TEP_NSEC_OUTPUT);
tep_set_file_bigendian(pevent, file_bigendian);
- tep_set_host_bigendian(pevent, host_bigendian);
+ tep_set_local_bigendian(pevent, host_bigendian);
if (do_read(buf, 1) < 0)
goto out;
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index b749f812ac70..b023db136ef3 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -1,22 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* trace-event-scripting. Scripting engine common and initialization code.
*
* Copyright (C) 2009-2010 Tom Zanussi <tzanussi@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <stdio.h>
diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c
index cbe0dd758e3a..01b9d89bf5bf 100644
--- a/tools/perf/util/trace-event.c
+++ b/tools/perf/util/trace-event.c
@@ -40,7 +40,7 @@ int trace_event__init(struct trace_event *t)
static int trace_event__init2(void)
{
- int be = tep_host_bigendian();
+ int be = tep_is_bigendian();
struct tep_handle *pevent;
if (trace_event__init(&tevent))
@@ -49,7 +49,7 @@ static int trace_event__init2(void)
pevent = tevent.pevent;
tep_set_flag(pevent, TEP_NSEC_OUTPUT);
tep_set_file_bigendian(pevent, be);
- tep_set_host_bigendian(pevent, be);
+ tep_set_local_bigendian(pevent, be);
tevent_initialized = true;
return 0;
}
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 5eff9bfc5758..407d0167b942 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -8,6 +8,8 @@
#include "unwind.h"
#include "unwind-libdw.h"
#include "machine.h"
+#include "map.h"
+#include "symbol.h"
#include "thread.h"
#include <linux/types.h>
#include "event.h"
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 79f521a552cf..25e1406b1f8b 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -34,6 +34,7 @@
#include "session.h"
#include "perf_regs.h"
#include "unwind.h"
+#include "map.h"
#include "symbol.h"
#include "util.h"
#include "debug.h"
@@ -616,8 +617,6 @@ static unw_accessors_t accessors = {
static int _unwind__prepare_access(struct thread *thread)
{
- if (!dwarf_callchain_users)
- return 0;
thread->addr_space = unw_create_addr_space(&accessors, 0);
if (!thread->addr_space) {
pr_err("unwind: Can't create unwind address space.\n");
@@ -630,15 +629,11 @@ static int _unwind__prepare_access(struct thread *thread)
static void _unwind__flush_access(struct thread *thread)
{
- if (!dwarf_callchain_users)
- return;
unw_flush_cache(thread->addr_space, 0, 0);
}
static void _unwind__finish_access(struct thread *thread)
{
- if (!dwarf_callchain_users)
- return;
unw_destroy_addr_space(thread->addr_space);
}
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index b029a5e9ae49..c0811977d7d5 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
#include "unwind.h"
+#include "map.h"
#include "thread.h"
#include "session.h"
#include "debug.h"
#include "env.h"
+#include "callchain.h"
struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
@@ -23,6 +25,9 @@ int unwind__prepare_access(struct thread *thread, struct map *map,
struct unwind_libunwind_ops *ops = local_unwind_libunwind_ops;
int err;
+ if (!dwarf_callchain_users)
+ return 0;
+
if (thread->addr_space) {
pr_debug("unwind: thread map already set, dso=%s\n",
map->dso->name);
@@ -64,12 +69,18 @@ out_register:
void unwind__flush_access(struct thread *thread)
{
+ if (!dwarf_callchain_users)
+ return;
+
if (thread->unwind_libunwind_ops)
thread->unwind_libunwind_ops->flush_access(thread);
}
void unwind__finish_access(struct thread *thread)
{
+ if (!dwarf_callchain_users)
+ return;
+
if (thread->unwind_libunwind_ops)
thread->unwind_libunwind_ops->finish_access(thread);
}
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 093352e93d50..d388f80d8703 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -2,6 +2,7 @@
#include "../perf.h"
#include "util.h"
#include "debug.h"
+#include "namespaces.h"
#include <api/fs/fs.h>
#include <sys/mman.h>
#include <sys/stat.h>
@@ -20,6 +21,7 @@
#include <linux/time64.h>
#include <unistd.h>
#include "strlist.h"
+#include "string2.h"
/*
* XXX We need to find a better place for these things...
@@ -116,23 +118,67 @@ int mkdir_p(char *path, mode_t mode)
return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0;
}
-int rm_rf(const char *path)
+static bool match_pat(char *file, const char **pat)
+{
+ int i = 0;
+
+ if (!pat)
+ return true;
+
+ while (pat[i]) {
+ if (strglobmatch(file, pat[i]))
+ return true;
+
+ i++;
+ }
+
+ return false;
+}
+
+/*
+ * The depth specify how deep the removal will go.
+ * 0 - will remove only files under the 'path' directory
+ * 1 .. x - will dive in x-level deep under the 'path' directory
+ *
+ * If specified the pat is array of string patterns ended with NULL,
+ * which are checked upon every file/directory found. Only matching
+ * ones are removed.
+ *
+ * The function returns:
+ * 0 on success
+ * -1 on removal failure with errno set
+ * -2 on pattern failure
+ */
+static int rm_rf_depth_pat(const char *path, int depth, const char **pat)
{
DIR *dir;
- int ret = 0;
+ int ret;
struct dirent *d;
char namebuf[PATH_MAX];
+ struct stat statbuf;
+ /* Do not fail if there's no file. */
+ ret = lstat(path, &statbuf);
+ if (ret)
+ return 0;
+
+ /* Try to remove any file we get. */
+ if (!(statbuf.st_mode & S_IFDIR))
+ return unlink(path);
+
+ /* We have directory in path. */
dir = opendir(path);
if (dir == NULL)
- return 0;
+ return -1;
while ((d = readdir(dir)) != NULL && !ret) {
- struct stat statbuf;
if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
continue;
+ if (!match_pat(d->d_name, pat))
+ return -2;
+
scnprintf(namebuf, sizeof(namebuf), "%s/%s",
path, d->d_name);
@@ -144,7 +190,7 @@ int rm_rf(const char *path)
}
if (S_ISDIR(statbuf.st_mode))
- ret = rm_rf(namebuf);
+ ret = depth ? rm_rf_depth_pat(namebuf, depth - 1, pat) : 0;
else
ret = unlink(namebuf);
}
@@ -156,6 +202,22 @@ int rm_rf(const char *path)
return rmdir(path);
}
+int rm_rf_perf_data(const char *path)
+{
+ const char *pat[] = {
+ "header",
+ "data.*",
+ NULL,
+ };
+
+ return rm_rf_depth_pat(path, 0, pat);
+}
+
+int rm_rf(const char *path)
+{
+ return rm_rf_depth_pat(path, INT_MAX, NULL);
+}
+
/* A filter which removes dot files */
bool lsdir_no_dot_filter(const char *name __maybe_unused, struct dirent *d)
{
@@ -506,3 +568,13 @@ out:
return tip;
}
+
+char *perf_exe(char *buf, int len)
+{
+ int n = readlink("/proc/self/exe", buf, len);
+ if (n > 0) {
+ buf[n] = 0;
+ return buf;
+ }
+ return strcpy(buf, "perf");
+}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index ece040b799f6..09c1b0f91f65 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -31,6 +31,7 @@ struct strlist;
int mkdir_p(char *path, mode_t mode);
int rm_rf(const char *path);
+int rm_rf_perf_data(const char *path);
struct strlist *lsdir(const char *name, bool (*filter)(const char *, struct dirent *));
bool lsdir_no_dot_filter(const char *name, struct dirent *d);
int copyfile(const char *from, const char *to);
@@ -76,6 +77,8 @@ extern bool perf_singlethreaded;
void perf_set_singlethreaded(void);
void perf_set_multithreaded(void);
+char *perf_exe(char *buf, int len);
+
#ifndef O_CLOEXEC
#ifdef __sparc__
#define O_CLOEXEC 0x400000
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index 3702cba11d7d..5031b7b22bbd 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -11,6 +11,7 @@
#include "vdso.h"
#include "util.h"
+#include "map.h"
#include "symbol.h"
#include "machine.h"
#include "thread.h"
diff --git a/tools/perf/util/zlib.c b/tools/perf/util/zlib.c
index 902ce6384f57..512ad7c09b13 100644
--- a/tools/perf/util/zlib.c
+++ b/tools/perf/util/zlib.c
@@ -6,7 +6,6 @@
#include <sys/mman.h>
#include <zlib.h>
#include <linux/compiler.h>
-#include <unistd.h>
#include "util/compress.h"
#include "util/util.h"
diff --git a/tools/perf/util/zstd.c b/tools/perf/util/zstd.c
new file mode 100644
index 000000000000..23bdb9884576
--- /dev/null
+++ b/tools/perf/util/zstd.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <string.h>
+
+#include "util/compress.h"
+#include "util/debug.h"
+
+int zstd_init(struct zstd_data *data, int level)
+{
+ size_t ret;
+
+ data->dstream = ZSTD_createDStream();
+ if (data->dstream == NULL) {
+ pr_err("Couldn't create decompression stream.\n");
+ return -1;
+ }
+
+ ret = ZSTD_initDStream(data->dstream);
+ if (ZSTD_isError(ret)) {
+ pr_err("Failed to initialize decompression stream: %s\n", ZSTD_getErrorName(ret));
+ return -1;
+ }
+
+ if (!level)
+ return 0;
+
+ data->cstream = ZSTD_createCStream();
+ if (data->cstream == NULL) {
+ pr_err("Couldn't create compression stream.\n");
+ return -1;
+ }
+
+ ret = ZSTD_initCStream(data->cstream, level);
+ if (ZSTD_isError(ret)) {
+ pr_err("Failed to initialize compression stream: %s\n", ZSTD_getErrorName(ret));
+ return -1;
+ }
+
+ return 0;
+}
+
+int zstd_fini(struct zstd_data *data)
+{
+ if (data->dstream) {
+ ZSTD_freeDStream(data->dstream);
+ data->dstream = NULL;
+ }
+
+ if (data->cstream) {
+ ZSTD_freeCStream(data->cstream);
+ data->cstream = NULL;
+ }
+
+ return 0;
+}
+
+size_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size,
+ void *src, size_t src_size, size_t max_record_size,
+ size_t process_header(void *record, size_t increment))
+{
+ size_t ret, size, compressed = 0;
+ ZSTD_inBuffer input = { src, src_size, 0 };
+ ZSTD_outBuffer output;
+ void *record;
+
+ while (input.pos < input.size) {
+ record = dst;
+ size = process_header(record, 0);
+ compressed += size;
+ dst += size;
+ dst_size -= size;
+ output = (ZSTD_outBuffer){ dst, (dst_size > max_record_size) ?
+ max_record_size : dst_size, 0 };
+ ret = ZSTD_compressStream(data->cstream, &output, &input);
+ ZSTD_flushStream(data->cstream, &output);
+ if (ZSTD_isError(ret)) {
+ pr_err("failed to compress %ld bytes: %s\n",
+ (long)src_size, ZSTD_getErrorName(ret));
+ memcpy(dst, src, src_size);
+ return src_size;
+ }
+ size = output.pos;
+ size = process_header(record, size);
+ compressed += size;
+ dst += size;
+ dst_size -= size;
+ }
+
+ return compressed;
+}
+
+size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size,
+ void *dst, size_t dst_size)
+{
+ size_t ret;
+ ZSTD_inBuffer input = { src, src_size, 0 };
+ ZSTD_outBuffer output = { dst, dst_size, 0 };
+
+ while (input.pos < input.size) {
+ ret = ZSTD_decompressStream(data->dstream, &output, &input);
+ if (ZSTD_isError(ret)) {
+ pr_err("failed to decompress (B): %ld -> %ld : %s\n",
+ src_size, output.size, ZSTD_getErrorName(ret));
+ break;
+ }
+ output.dst = dst + output.pos;
+ output.size = dst_size - output.pos;
+ }
+
+ return output.pos;
+}
diff --git a/tools/power/acpi/Makefile b/tools/power/acpi/Makefile
index a8bf9081512b..ebd3e1a1c28e 100644
--- a/tools/power/acpi/Makefile
+++ b/tools/power/acpi/Makefile
@@ -1,12 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
# tools/power/acpi/Makefile - ACPI tool Makefile
#
# Copyright (c) 2013, Intel Corporation
# Author: Lv Zheng <lv.zheng@intel.com>
#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; version 2
-# of the License.
include ../../scripts/Makefile.include
diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
index f304be71c278..0111d246d1ca 100644
--- a/tools/power/acpi/Makefile.config
+++ b/tools/power/acpi/Makefile.config
@@ -1,12 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
# tools/power/acpi/Makefile.config - ACPI tool Makefile
#
# Copyright (c) 2015, Intel Corporation
# Author: Lv Zheng <lv.zheng@intel.com>
#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; version 2
-# of the License.
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(shell pwd)))
diff --git a/tools/power/acpi/Makefile.rules b/tools/power/acpi/Makefile.rules
index 373738338f51..2a6c170b57cd 100644
--- a/tools/power/acpi/Makefile.rules
+++ b/tools/power/acpi/Makefile.rules
@@ -1,12 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
# tools/power/acpi/Makefile.rules - ACPI tool Makefile
#
# Copyright (c) 2015, Intel Corporation
# Author: Lv Zheng <lv.zheng@intel.com>
#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; version 2
-# of the License.
objdir := $(OUTPUT)tools/$(TOOL)/
toolobjs := $(addprefix $(objdir),$(TOOL_OBJS))
diff --git a/tools/power/acpi/common/cmfsize.c b/tools/power/acpi/common/cmfsize.c
index ff8025de8237..28c11c6b4d06 100644
--- a/tools/power/acpi/common/cmfsize.c
+++ b/tools/power/acpi/common/cmfsize.c
@@ -3,7 +3,7 @@
*
* Module Name: cfsize - Common get file size function
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/common/getopt.c b/tools/power/acpi/common/getopt.c
index 7be89b873661..6b41d8b64a00 100644
--- a/tools/power/acpi/common/getopt.c
+++ b/tools/power/acpi/common/getopt.c
@@ -3,7 +3,7 @@
*
* Module Name: getopt
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
index a20c703f8b7d..d1f3d44e315e 100644
--- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
+++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
@@ -3,7 +3,7 @@
*
* Module Name: oslinuxtbl - Linux OSL for obtaining ACPI tables
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -19,7 +19,7 @@ ACPI_MODULE_NAME("oslinuxtbl")
typedef struct osl_table_info {
struct osl_table_info *next;
u32 instance;
- char signature[ACPI_NAME_SIZE];
+ char signature[ACPI_NAMESEG_SIZE];
} osl_table_info;
@@ -286,14 +286,14 @@ static acpi_status osl_add_table_to_list(char *signature, u32 instance)
return (AE_NO_MEMORY);
}
- ACPI_MOVE_NAME(new_info->signature, signature);
+ ACPI_COPY_NAMESEG(new_info->signature, signature);
if (!gbl_table_list_head) {
gbl_table_list_head = new_info;
} else {
next = gbl_table_list_head;
while (1) {
- if (ACPI_COMPARE_NAME(next->signature, signature)) {
+ if (ACPI_COMPARE_NAMESEG(next->signature, signature)) {
if (next->instance == instance) {
found = TRUE;
}
@@ -782,11 +782,11 @@ osl_get_bios_table(char *signature,
/* Handle special tables whose addresses are not in RSDT/XSDT */
- if (ACPI_COMPARE_NAME(signature, ACPI_RSDP_NAME) ||
- ACPI_COMPARE_NAME(signature, ACPI_SIG_RSDT) ||
- ACPI_COMPARE_NAME(signature, ACPI_SIG_XSDT) ||
- ACPI_COMPARE_NAME(signature, ACPI_SIG_DSDT) ||
- ACPI_COMPARE_NAME(signature, ACPI_SIG_FACS)) {
+ if (ACPI_COMPARE_NAMESEG(signature, ACPI_RSDP_NAME) ||
+ ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_RSDT) ||
+ ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_XSDT) ||
+ ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_DSDT) ||
+ ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_FACS)) {
find_next_instance:
@@ -797,7 +797,7 @@ find_next_instance:
* careful about the FADT length and validate table addresses.
* Note: The 64-bit addresses have priority.
*/
- if (ACPI_COMPARE_NAME(signature, ACPI_SIG_DSDT)) {
+ if (ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_DSDT)) {
if (current_instance < 2) {
if ((gbl_fadt->header.length >=
MIN_FADT_FOR_XDSDT) && gbl_fadt->Xdsdt
@@ -815,7 +815,7 @@ find_next_instance:
dsdt;
}
}
- } else if (ACPI_COMPARE_NAME(signature, ACPI_SIG_FACS)) {
+ } else if (ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_FACS)) {
if (current_instance < 2) {
if ((gbl_fadt->header.length >=
MIN_FADT_FOR_XFACS) && gbl_fadt->Xfacs
@@ -833,7 +833,7 @@ find_next_instance:
facs;
}
}
- } else if (ACPI_COMPARE_NAME(signature, ACPI_SIG_XSDT)) {
+ } else if (ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_XSDT)) {
if (!gbl_revision) {
return (AE_BAD_SIGNATURE);
}
@@ -842,7 +842,7 @@ find_next_instance:
(acpi_physical_address)gbl_rsdp.
xsdt_physical_address;
}
- } else if (ACPI_COMPARE_NAME(signature, ACPI_SIG_RSDT)) {
+ } else if (ACPI_COMPARE_NAMESEG(signature, ACPI_SIG_RSDT)) {
if (current_instance == 0) {
table_address =
(acpi_physical_address)gbl_rsdp.
@@ -931,7 +931,7 @@ find_next_instance:
/* Does this table match the requested signature? */
- if (!ACPI_COMPARE_NAME
+ if (!ACPI_COMPARE_NAMESEG
(mapped_table->signature, signature)) {
osl_unmap_table(mapped_table);
mapped_table = NULL;
@@ -995,7 +995,7 @@ static acpi_status osl_list_customized_tables(char *directory)
{
void *table_dir;
u32 instance;
- char temp_name[ACPI_NAME_SIZE];
+ char temp_name[ACPI_NAMESEG_SIZE];
char *filename;
acpi_status status = AE_OK;
@@ -1086,8 +1086,8 @@ osl_map_table(acpi_size address,
return (AE_BAD_SIGNATURE);
}
} else
- if (!ACPI_COMPARE_NAME(signature, mapped_table->signature))
- {
+ if (!ACPI_COMPARE_NAMESEG
+ (signature, mapped_table->signature)) {
acpi_os_unmap_memory(mapped_table,
sizeof(struct acpi_table_header));
return (AE_BAD_SIGNATURE);
@@ -1158,15 +1158,15 @@ osl_table_name_from_file(char *filename, char *signature, u32 *instance)
/* Ignore meaningless files */
- if (strlen(filename) < ACPI_NAME_SIZE) {
+ if (strlen(filename) < ACPI_NAMESEG_SIZE) {
return (AE_BAD_SIGNATURE);
}
/* Extract instance number */
- if (isdigit((int)filename[ACPI_NAME_SIZE])) {
- sscanf(&filename[ACPI_NAME_SIZE], "%u", instance);
- } else if (strlen(filename) != ACPI_NAME_SIZE) {
+ if (isdigit((int)filename[ACPI_NAMESEG_SIZE])) {
+ sscanf(&filename[ACPI_NAMESEG_SIZE], "%u", instance);
+ } else if (strlen(filename) != ACPI_NAMESEG_SIZE) {
return (AE_BAD_SIGNATURE);
} else {
*instance = 0;
@@ -1174,7 +1174,7 @@ osl_table_name_from_file(char *filename, char *signature, u32 *instance)
/* Extract signature */
- ACPI_MOVE_NAME(signature, filename);
+ ACPI_COPY_NAMESEG(signature, filename);
return (AE_OK);
}
@@ -1236,7 +1236,7 @@ osl_read_table_from_file(char *filename,
status = AE_BAD_SIGNATURE;
goto exit;
}
- } else if (!ACPI_COMPARE_NAME(signature, header.signature)) {
+ } else if (!ACPI_COMPARE_NAMESEG(signature, header.signature)) {
fprintf(stderr,
"Incorrect signature: Expecting %4.4s, found %4.4s\n",
signature, header.signature);
@@ -1311,7 +1311,7 @@ osl_get_customized_table(char *pathname,
{
void *table_dir;
u32 current_instance = 0;
- char temp_name[ACPI_NAME_SIZE];
+ char temp_name[ACPI_NAMESEG_SIZE];
char table_filename[PATH_MAX];
char *filename;
acpi_status status;
@@ -1329,7 +1329,7 @@ osl_get_customized_table(char *pathname,
/* Ignore meaningless files */
- if (!ACPI_COMPARE_NAME(filename, signature)) {
+ if (!ACPI_COMPARE_NAMESEG(filename, signature)) {
continue;
}
diff --git a/tools/power/acpi/os_specific/service_layers/osunixdir.c b/tools/power/acpi/os_specific/service_layers/osunixdir.c
index 4fd44e218a83..30913f124dd5 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixdir.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixdir.c
@@ -3,7 +3,7 @@
*
* Module Name: osunixdir - Unix directory access interfaces
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/os_specific/service_layers/osunixmap.c b/tools/power/acpi/os_specific/service_layers/osunixmap.c
index 93d8359309b6..29dfb47adfeb 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixmap.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixmap.c
@@ -3,7 +3,7 @@
*
* Module Name: osunixmap - Unix OSL for file mappings
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c
index 8a88e87778bd..83d3b3b829b8 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixxf.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c
@@ -3,7 +3,7 @@
*
* Module Name: osunixxf - UNIX OSL interfaces
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/tools/acpidbg/Makefile b/tools/power/acpi/tools/acpidbg/Makefile
index f2d06e773eb4..2ce0ee5d0deb 100644
--- a/tools/power/acpi/tools/acpidbg/Makefile
+++ b/tools/power/acpi/tools/acpidbg/Makefile
@@ -1,12 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
# tools/power/acpi/tools/acpidbg/Makefile - ACPI tool Makefile
#
# Copyright (c) 2015, Intel Corporation
# Author: Lv Zheng <lv.zheng@intel.com>
#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; version 2
-# of the License.
include ../../Makefile.config
diff --git a/tools/power/acpi/tools/acpidbg/acpidbg.c b/tools/power/acpi/tools/acpidbg/acpidbg.c
index 4308362d7068..3d2bfd716028 100644
--- a/tools/power/acpi/tools/acpidbg/acpidbg.c
+++ b/tools/power/acpi/tools/acpidbg/acpidbg.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI AML interfacing userspace utility
*
* Copyright (C) 2015, Intel Corporation
* Authors: Lv Zheng <lv.zheng@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <acpi/acpi.h>
diff --git a/tools/power/acpi/tools/acpidump/Makefile b/tools/power/acpi/tools/acpidump/Makefile
index b436f8675f6a..1208a105a871 100644
--- a/tools/power/acpi/tools/acpidump/Makefile
+++ b/tools/power/acpi/tools/acpidump/Makefile
@@ -1,12 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
# tools/power/acpi/tools/acpidump/Makefile - ACPI tool Makefile
#
# Copyright (c) 2015, Intel Corporation
# Author: Lv Zheng <lv.zheng@intel.com>
#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; version 2
-# of the License.
include ../../Makefile.config
diff --git a/tools/power/acpi/tools/acpidump/acpidump.h b/tools/power/acpi/tools/acpidump/acpidump.h
index f69f1f559743..2eb0aaa4f462 100644
--- a/tools/power/acpi/tools/acpidump/acpidump.h
+++ b/tools/power/acpi/tools/acpidump/acpidump.h
@@ -3,7 +3,7 @@
*
* Module Name: acpidump.h - Include file for acpi_dump utility
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c
index a9848de32d8e..820baeb5092b 100644
--- a/tools/power/acpi/tools/acpidump/apdump.c
+++ b/tools/power/acpi/tools/acpidump/apdump.c
@@ -3,7 +3,7 @@
*
* Module Name: apdump - Dump routines for ACPI tables (acpidump)
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -289,14 +289,14 @@ int ap_dump_table_by_address(char *ascii_address)
int ap_dump_table_by_name(char *signature)
{
- char local_signature[ACPI_NAME_SIZE + 1];
+ char local_signature[ACPI_NAMESEG_SIZE + 1];
u32 instance;
struct acpi_table_header *table;
acpi_physical_address address;
acpi_status status;
int table_status;
- if (strlen(signature) != ACPI_NAME_SIZE) {
+ if (strlen(signature) != ACPI_NAMESEG_SIZE) {
fprintf(stderr,
"Invalid table signature [%s]: must be exactly 4 characters\n",
signature);
@@ -310,9 +310,9 @@ int ap_dump_table_by_name(char *signature)
/* To be friendly, handle tables whose signatures do not match the name */
- if (ACPI_COMPARE_NAME(local_signature, "FADT")) {
+ if (ACPI_COMPARE_NAMESEG(local_signature, "FADT")) {
strcpy(local_signature, ACPI_SIG_FADT);
- } else if (ACPI_COMPARE_NAME(local_signature, "MADT")) {
+ } else if (ACPI_COMPARE_NAMESEG(local_signature, "MADT")) {
strcpy(local_signature, ACPI_SIG_MADT);
}
diff --git a/tools/power/acpi/tools/acpidump/apfiles.c b/tools/power/acpi/tools/acpidump/apfiles.c
index e86207e5afcf..a42cfcaa3293 100644
--- a/tools/power/acpi/tools/acpidump/apfiles.c
+++ b/tools/power/acpi/tools/acpidump/apfiles.c
@@ -3,7 +3,7 @@
*
* Module Name: apfiles - File-related functions for acpidump utility
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
@@ -97,7 +97,7 @@ int ap_open_output_file(char *pathname)
int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance)
{
- char filename[ACPI_NAME_SIZE + 16];
+ char filename[ACPI_NAMESEG_SIZE + 16];
char instance_str[16];
ACPI_FILE file;
acpi_size actual;
@@ -110,16 +110,16 @@ int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance)
/* Construct lower-case filename from the table local signature */
if (ACPI_VALIDATE_RSDP_SIG(table->signature)) {
- ACPI_MOVE_NAME(filename, ACPI_RSDP_NAME);
+ ACPI_COPY_NAMESEG(filename, ACPI_RSDP_NAME);
} else {
- ACPI_MOVE_NAME(filename, table->signature);
+ ACPI_COPY_NAMESEG(filename, table->signature);
}
filename[0] = (char)tolower((int)filename[0]);
filename[1] = (char)tolower((int)filename[1]);
filename[2] = (char)tolower((int)filename[2]);
filename[3] = (char)tolower((int)filename[3]);
- filename[ACPI_NAME_SIZE] = 0;
+ filename[ACPI_NAMESEG_SIZE] = 0;
/* Handle multiple SSDts - create different filenames for each */
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index 2d9b94b631cb..d8f1b57537d3 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -3,7 +3,7 @@
*
* Module Name: apmain - Main module for the acpidump utility
*
- * Copyright (C) 2000 - 2018, Intel Corp.
+ * Copyright (C) 2000 - 2019, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/tools/ec/Makefile b/tools/power/acpi/tools/ec/Makefile
index 75d8a127b6ee..d0abac0ec23a 100644
--- a/tools/power/acpi/tools/ec/Makefile
+++ b/tools/power/acpi/tools/ec/Makefile
@@ -1,12 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
# tools/power/acpi/tools/acpidump/Makefile - ACPI tool Makefile
#
# Copyright (c) 2015, Intel Corporation
# Author: Lv Zheng <lv.zheng@intel.com>
#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; version 2
-# of the License.
include ../../Makefile.config
diff --git a/tools/power/acpi/tools/ec/ec_access.c b/tools/power/acpi/tools/ec/ec_access.c
index 5f50642386db..8bb271b210d8 100644
--- a/tools/power/acpi/tools/ec/ec_access.c
+++ b/tools/power/acpi/tools/ec/ec_access.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* ec_access.c
*
* Copyright (C) 2010 SUSE Linux Products GmbH
* Author:
* Thomas Renninger <trenn@suse.de>
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
*/
#include <fcntl.h>
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index fd8765af19bb..9063fca480b3 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
# Makefile for cpupower
#
# Copyright (C) 2005,2006 Dominik Brodowski <linux@dominikbrodowski.net>
@@ -6,19 +7,6 @@
#
# Copyright (C) 2003,2004 Greg Kroah-Hartman <greg@kroah.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
OUTPUT=./
ifeq ("$(origin O)", "command line")
OUTPUT := $(O)/
diff --git a/tools/power/cpupower/bench/benchmark.c b/tools/power/cpupower/bench/benchmark.c
index 429d51ab8031..c7234cf3f6ff 100644
--- a/tools/power/cpupower/bench/benchmark.c
+++ b/tools/power/cpupower/bench/benchmark.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/* cpufreq-bench CPUFreq microbenchmark
*
* Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <stdio.h>
diff --git a/tools/power/cpupower/bench/benchmark.h b/tools/power/cpupower/bench/benchmark.h
index 51d7f50ac2bb..bf612de897eb 100644
--- a/tools/power/cpupower/bench/benchmark.h
+++ b/tools/power/cpupower/bench/benchmark.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* cpufreq-bench CPUFreq microbenchmark
*
* Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/* load loop, this schould take about 1 to 2ms to complete */
diff --git a/tools/power/cpupower/bench/config.h b/tools/power/cpupower/bench/config.h
index ee6f258e5336..fec5b0b055f2 100644
--- a/tools/power/cpupower/bench/config.h
+++ b/tools/power/cpupower/bench/config.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* cpufreq-bench CPUFreq microbenchmark
*
* Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/* initial loop count for the load calibration */
diff --git a/tools/power/cpupower/bench/cpufreq-bench_plot.sh b/tools/power/cpupower/bench/cpufreq-bench_plot.sh
index 410021a12f40..9061b4f1244e 100644
--- a/tools/power/cpupower/bench/cpufreq-bench_plot.sh
+++ b/tools/power/cpupower/bench/cpufreq-bench_plot.sh
@@ -1,19 +1,6 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-or-later
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
-# 02110-1301, USA.
# Author/Copyright(c): 2009, Thomas Renninger <trenn@suse.de>, Novell Inc.
diff --git a/tools/power/cpupower/bench/cpufreq-bench_script.sh b/tools/power/cpupower/bench/cpufreq-bench_script.sh
index de20d2a06879..4e9714b876d2 100644
--- a/tools/power/cpupower/bench/cpufreq-bench_script.sh
+++ b/tools/power/cpupower/bench/cpufreq-bench_script.sh
@@ -1,19 +1,6 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-or-later
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
-# 02110-1301, USA.
# Author/Copyright(c): 2009, Thomas Renninger <trenn@suse.de>, Novell Inc.
diff --git a/tools/power/cpupower/bench/main.c b/tools/power/cpupower/bench/main.c
index 24910313a521..429d1b6b8bc8 100644
--- a/tools/power/cpupower/bench/main.c
+++ b/tools/power/cpupower/bench/main.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/* cpufreq-bench CPUFreq microbenchmark
*
* Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <stdio.h>
diff --git a/tools/power/cpupower/bench/parse.c b/tools/power/cpupower/bench/parse.c
index 84caee38418f..e63dc11fa3a5 100644
--- a/tools/power/cpupower/bench/parse.c
+++ b/tools/power/cpupower/bench/parse.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/* cpufreq-bench CPUFreq microbenchmark
*
* Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <stdio.h>
diff --git a/tools/power/cpupower/bench/parse.h b/tools/power/cpupower/bench/parse.h
index a8dc632d9eee..d5b3e34d7064 100644
--- a/tools/power/cpupower/bench/parse.h
+++ b/tools/power/cpupower/bench/parse.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* cpufreq-bench CPUFreq microbenchmark
*
* Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/* struct that holds the required config parameters */
diff --git a/tools/power/cpupower/bench/system.c b/tools/power/cpupower/bench/system.c
index 2bb3eef7d5c1..40f3679e70b5 100644
--- a/tools/power/cpupower/bench/system.c
+++ b/tools/power/cpupower/bench/system.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/* cpufreq-bench CPUFreq microbenchmark
*
* Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <stdio.h>
diff --git a/tools/power/cpupower/bench/system.h b/tools/power/cpupower/bench/system.h
index 3a8c858b78f0..530fa28230d1 100644
--- a/tools/power/cpupower/bench/system.h
+++ b/tools/power/cpupower/bench/system.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/* cpufreq-bench CPUFreq microbenchmark
*
* Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include "parse.h"
diff --git a/tools/power/cpupower/debug/i386/centrino-decode.c b/tools/power/cpupower/debug/i386/centrino-decode.c
index 7ef24cce4926..700cd31a7d02 100644
--- a/tools/power/cpupower/debug/i386/centrino-decode.c
+++ b/tools/power/cpupower/debug/i386/centrino-decode.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2003 - 2004 Dominik Brodowski <linux@dominikbrodowski.de>
*
- * Licensed under the terms of the GNU GPL License version 2.
- *
* Based on code found in
* linux/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
* and originally developed by Jeremy Fitzhardinge.
diff --git a/tools/power/cpupower/debug/i386/intel_gsic.c b/tools/power/cpupower/debug/i386/intel_gsic.c
index d032c826d42e..e5e926f46d6b 100644
--- a/tools/power/cpupower/debug/i386/intel_gsic.c
+++ b/tools/power/cpupower/debug/i386/intel_gsic.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2003 Bruno Ducrot
* (C) 2004 Dominik Brodowski <linux@dominikbrodowski.de>
*
- * Licensed under the terms of the GNU GPL License version 2.
- *
* Based on code found in
* linux/include/asm-i386/ist.h and linux/arch/i386/kernel/setup.c
* and originally developed by Andy Grover <andrew.grover@intel.com>
diff --git a/tools/power/cpupower/debug/i386/powernow-k8-decode.c b/tools/power/cpupower/debug/i386/powernow-k8-decode.c
index 638a6b3bfd97..735dca1e25bc 100644
--- a/tools/power/cpupower/debug/i386/powernow-k8-decode.c
+++ b/tools/power/cpupower/debug/i386/powernow-k8-decode.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2004 Bruno Ducrot <ducrot@poupinou.org>
*
- * Licensed under the terms of the GNU GPL License version 2.
- *
* Based on code found in
* linux/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
* and originally developed by Paul Devriendt
diff --git a/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c b/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c
index 6ff8383f2941..e364b170bf85 100644
--- a/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c
+++ b/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* test module to check whether the TSC-based delay routine continues
* to work properly after cpufreq transitions. Needs ACPI to work
diff --git a/tools/power/cpupower/lib/cpufreq.c b/tools/power/cpupower/lib/cpufreq.c
index 0c0f3e3f0d80..2f55d4d23446 100644
--- a/tools/power/cpupower/lib/cpufreq.c
+++ b/tools/power/cpupower/lib/cpufreq.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
- *
- * Licensed under the terms of the GNU GPL License version 2.
*/
@@ -333,17 +332,20 @@ void cpufreq_put_available_governors(struct cpufreq_available_governors *any)
}
-struct cpufreq_available_frequencies
-*cpufreq_get_available_frequencies(unsigned int cpu)
+struct cpufreq_frequencies
+*cpufreq_get_frequencies(const char *type, unsigned int cpu)
{
- struct cpufreq_available_frequencies *first = NULL;
- struct cpufreq_available_frequencies *current = NULL;
+ struct cpufreq_frequencies *first = NULL;
+ struct cpufreq_frequencies *current = NULL;
char one_value[SYSFS_PATH_MAX];
char linebuf[MAX_LINE_LEN];
+ char fname[MAX_LINE_LEN];
unsigned int pos, i;
unsigned int len;
- len = sysfs_cpufreq_read_file(cpu, "scaling_available_frequencies",
+ snprintf(fname, MAX_LINE_LEN, "scaling_%s_frequencies", type);
+
+ len = sysfs_cpufreq_read_file(cpu, fname,
linebuf, sizeof(linebuf));
if (len == 0)
return NULL;
@@ -389,9 +391,9 @@ struct cpufreq_available_frequencies
return NULL;
}
-void cpufreq_put_available_frequencies(struct cpufreq_available_frequencies
- *any) {
- struct cpufreq_available_frequencies *tmp, *next;
+void cpufreq_put_frequencies(struct cpufreq_frequencies *any)
+{
+ struct cpufreq_frequencies *tmp, *next;
if (!any)
return;
diff --git a/tools/power/cpupower/lib/cpufreq.h b/tools/power/cpupower/lib/cpufreq.h
index 60beaf5ed2ea..a55f0d19215b 100644
--- a/tools/power/cpupower/lib/cpufreq.h
+++ b/tools/power/cpupower/lib/cpufreq.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* cpufreq.h - definitions for libcpufreq
*
* Copyright (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __CPUPOWER_CPUFREQ_H__
@@ -28,10 +20,10 @@ struct cpufreq_available_governors {
struct cpufreq_available_governors *first;
};
-struct cpufreq_available_frequencies {
+struct cpufreq_frequencies {
unsigned long frequency;
- struct cpufreq_available_frequencies *next;
- struct cpufreq_available_frequencies *first;
+ struct cpufreq_frequencies *next;
+ struct cpufreq_frequencies *first;
};
@@ -129,14 +121,14 @@ void cpufreq_put_available_governors(
*
* Only present on _some_ ->target() cpufreq drivers. For information purposes
* only. Please free allocated memory by calling
- * cpufreq_put_available_frequencies after use.
+ * cpufreq_put_frequencies after use.
*/
-struct cpufreq_available_frequencies
-*cpufreq_get_available_frequencies(unsigned int cpu);
+struct cpufreq_frequencies
+*cpufreq_get_frequencies(const char *type, unsigned int cpu);
-void cpufreq_put_available_frequencies(
- struct cpufreq_available_frequencies *first);
+void cpufreq_put_frequencies(
+ struct cpufreq_frequencies *first);
/* determine affected CPUs
diff --git a/tools/power/cpupower/lib/cpuidle.c b/tools/power/cpupower/lib/cpuidle.c
index 852d25462388..479c5971aa6d 100644
--- a/tools/power/cpupower/lib/cpuidle.c
+++ b/tools/power/cpupower/lib/cpuidle.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
* (C) 2011 Thomas Renninger <trenn@novell.com> Novell Inc.
- *
- * Licensed under the terms of the GNU GPL License version 2.
*/
#include <stdio.h>
diff --git a/tools/power/cpupower/lib/cpupower.c b/tools/power/cpupower/lib/cpupower.c
index 9711d628b0f4..3656e697537e 100644
--- a/tools/power/cpupower/lib/cpupower.c
+++ b/tools/power/cpupower/lib/cpupower.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
- *
- * Licensed under the terms of the GNU GPL License version 2.
*/
#include <sys/types.h>
diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c
index c3f39d5128ee..e63cf55f81cf 100644
--- a/tools/power/cpupower/utils/cpufreq-info.c
+++ b/tools/power/cpupower/utils/cpufreq-info.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
- *
- * Licensed under the terms of the GNU GPL License version 2.
*/
@@ -161,19 +160,12 @@ static void print_duration(unsigned long duration)
return;
}
-/* --boost / -b */
-
-static int get_boost_mode(unsigned int cpu)
+static int get_boost_mode_x86(unsigned int cpu)
{
int support, active, b_states = 0, ret, pstate_no, i;
/* ToDo: Make this more global */
unsigned long pstates[MAX_HW_PSTATES] = {0,};
- if (cpupower_cpu_info.vendor != X86_VENDOR_AMD &&
- cpupower_cpu_info.vendor != X86_VENDOR_HYGON &&
- cpupower_cpu_info.vendor != X86_VENDOR_INTEL)
- return 0;
-
ret = cpufreq_has_boost_support(cpu, &support, &active, &b_states);
if (ret) {
printf(_("Error while evaluating Boost Capabilities"
@@ -248,6 +240,33 @@ static int get_boost_mode(unsigned int cpu)
return 0;
}
+/* --boost / -b */
+
+static int get_boost_mode(unsigned int cpu)
+{
+ struct cpufreq_frequencies *freqs;
+
+ if (cpupower_cpu_info.vendor == X86_VENDOR_AMD ||
+ cpupower_cpu_info.vendor == X86_VENDOR_HYGON ||
+ cpupower_cpu_info.vendor == X86_VENDOR_INTEL)
+ return get_boost_mode_x86(cpu);
+
+ freqs = cpufreq_get_frequencies("boost", cpu);
+ if (freqs) {
+ printf(_(" boost frequency steps: "));
+ while (freqs->next) {
+ print_speed(freqs->frequency);
+ printf(", ");
+ freqs = freqs->next;
+ }
+ print_speed(freqs->frequency);
+ printf("\n");
+ cpufreq_put_frequencies(freqs);
+ }
+
+ return 0;
+}
+
/* --freq / -f */
static int get_freq_kernel(unsigned int cpu, unsigned int human)
@@ -456,7 +475,7 @@ static int get_latency(unsigned int cpu, unsigned int human)
static void debug_output_one(unsigned int cpu)
{
- struct cpufreq_available_frequencies *freqs;
+ struct cpufreq_frequencies *freqs;
get_driver(cpu);
get_related_cpus(cpu);
@@ -464,7 +483,7 @@ static void debug_output_one(unsigned int cpu)
get_latency(cpu, 1);
get_hardware_limits(cpu, 1);
- freqs = cpufreq_get_available_frequencies(cpu);
+ freqs = cpufreq_get_frequencies("available", cpu);
if (freqs) {
printf(_(" available frequency steps: "));
while (freqs->next) {
@@ -474,7 +493,7 @@ static void debug_output_one(unsigned int cpu)
}
print_speed(freqs->frequency);
printf("\n");
- cpufreq_put_available_frequencies(freqs);
+ cpufreq_put_frequencies(freqs);
}
get_available_governors(cpu);
diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c
index 1eef0aed6423..f49bc4aa2a08 100644
--- a/tools/power/cpupower/utils/cpufreq-set.c
+++ b/tools/power/cpupower/utils/cpufreq-set.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
- *
- * Licensed under the terms of the GNU GPL License version 2.
*/
diff --git a/tools/power/cpupower/utils/cpuidle-info.c b/tools/power/cpupower/utils/cpuidle-info.c
index b59c85defa05..f2b202c5552a 100644
--- a/tools/power/cpupower/utils/cpuidle-info.c
+++ b/tools/power/cpupower/utils/cpuidle-info.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
* (C) 2010 Thomas Renninger <trenn@suse.de>
- *
- * Licensed under the terms of the GNU GPL License version 2.
*/
diff --git a/tools/power/cpupower/utils/cpupower-info.c b/tools/power/cpupower/utils/cpupower-info.c
index c7caa8eaa6d0..4c9d342b70ff 100644
--- a/tools/power/cpupower/utils/cpupower-info.c
+++ b/tools/power/cpupower/utils/cpupower-info.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
- *
- * Licensed under the terms of the GNU GPL License version 2.
*/
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
index 532f46b9a335..3cd95c6cb974 100644
--- a/tools/power/cpupower/utils/cpupower-set.c
+++ b/tools/power/cpupower/utils/cpupower-set.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
- *
- * Licensed under the terms of the GNU GPL License version 2.
*/
diff --git a/tools/power/cpupower/utils/cpupower.c b/tools/power/cpupower/utils/cpupower.c
index 2dccf4998599..8e3d08042825 100644
--- a/tools/power/cpupower/utils/cpupower.c
+++ b/tools/power/cpupower/utils/cpupower.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
*
- * Licensed under the terms of the GNU GPL License version 2.
- *
* Ideas taken over from the perf userspace tool (included in the Linus
* kernel git repo): subcommand builtins and param parsing.
*/
diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h
index 902139689315..357b19bb136e 100644
--- a/tools/power/cpupower/utils/helpers/helpers.h
+++ b/tools/power/cpupower/utils/helpers/helpers.h
@@ -1,8 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
*
- * Licensed under the terms of the GNU GPL License version 2.
- *
* Miscellaneous helpers which do not fit or are worth
* to put into separate headers
*/
diff --git a/tools/power/cpupower/utils/helpers/sysfs.c b/tools/power/cpupower/utils/helpers/sysfs.c
index 4e8fe2c7b054..e13ff38329a0 100644
--- a/tools/power/cpupower/utils/helpers/sysfs.c
+++ b/tools/power/cpupower/utils/helpers/sysfs.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
* (C) 2011 Thomas Renninger <trenn@novell.com> Novell Inc.
- *
- * Licensed under the terms of the GNU GPL License version 2.
*/
#include <stdio.h>
diff --git a/tools/power/cpupower/utils/helpers/topology.c b/tools/power/cpupower/utils/helpers/topology.c
index a1a6c6041a1e..3dd0925d7594 100644
--- a/tools/power/cpupower/utils/helpers/topology.c
+++ b/tools/power/cpupower/utils/helpers/topology.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
*
- * Licensed under the terms of the GNU GPL License version 2.
- *
* ToDo: Needs to be done more properly for AMD/Intel specifics
*/
diff --git a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
index 2116df9ad832..3f893b99b337 100644
--- a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
+++ b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
*
- * Licensed under the terms of the GNU GPL License version 2.
- *
* PCI initialization based on example code from:
* Andreas Herrmann <andreas.herrmann3@amd.com>
*/
diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
index 5b8c4956ff9a..f634aeb65c5f 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
@@ -1,8 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc
- *
- * Licensed under the terms of the GNU GPL License version 2.
- *
*/
#include <stdio.h>
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
index 051da0a7c454..d3c3e6e7aa26 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
*
- * Licensed under the terms of the GNU GPL License version 2.
- *
* Output format inspired by Len Brown's <lenb@kernel.org> turbostat tool.
- *
*/
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
index 2ae50b499e0a..a2d901d3bfaf 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
@@ -1,8 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
- *
- * Licensed under the terms of the GNU GPL License version 2.
- *
*/
#ifndef __CPUIDLE_INFO_HW__
diff --git a/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
index f794d6bbb7e9..7c7451d3f494 100644
--- a/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
+++ b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
*
- * Licensed under the terms of the GNU GPL License version 2.
- *
* Based on SandyBridge monitor. Implements the new package C-states
* (PC8, PC9, PC10) coming with a specific Haswell (family 0x45) CPU.
*/
diff --git a/tools/power/cpupower/utils/idle_monitor/idle_monitors.h b/tools/power/cpupower/utils/idle_monitor/idle_monitors.h
index 4fcdeb1e07e8..e9e567ec879e 100644
--- a/tools/power/cpupower/utils/idle_monitor/idle_monitors.h
+++ b/tools/power/cpupower/utils/idle_monitor/idle_monitors.h
@@ -1,10 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
*
- * Licensed under the terms of the GNU GPL License version 2.
- *
* Based on the idea from Michael Matz <matz@suse.de>
- *
*/
#ifndef _CPUIDLE_IDLE_MONITORS_H_
diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
index f2a7e9cfd577..44806a6dae11 100644
--- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
@@ -1,7 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
- *
- * Licensed under the terms of the GNU GPL License version 2.
*/
#if defined(__i386__) || defined(__x86_64__)
diff --git a/tools/power/cpupower/utils/idle_monitor/nhm_idle.c b/tools/power/cpupower/utils/idle_monitor/nhm_idle.c
index abf8cb5f7349..be7256696a37 100644
--- a/tools/power/cpupower/utils/idle_monitor/nhm_idle.c
+++ b/tools/power/cpupower/utils/idle_monitor/nhm_idle.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
*
- * Licensed under the terms of the GNU GPL License version 2.
- *
* Based on Len Brown's <lenb@kernel.org> turbostat tool.
*/
diff --git a/tools/power/cpupower/utils/idle_monitor/snb_idle.c b/tools/power/cpupower/utils/idle_monitor/snb_idle.c
index a2b45219648d..968333571cad 100644
--- a/tools/power/cpupower/utils/idle_monitor/snb_idle.c
+++ b/tools/power/cpupower/utils/idle_monitor/snb_idle.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
*
- * Licensed under the terms of the GNU GPL License version 2.
- *
* Based on Len Brown's <lenb@kernel.org> turbostat tool.
*/
diff --git a/tools/power/pm-graph/bootgraph.py b/tools/power/pm-graph/bootgraph.py
index 6dae57041537..3d899dd8147a 100755
--- a/tools/power/pm-graph/bootgraph.py
+++ b/tools/power/pm-graph/bootgraph.py
@@ -1,17 +1,9 @@
#!/usr/bin/python2
+# SPDX-License-Identifier: GPL-2.0-only
#
# Tool for analyzing boot timing
# Copyright (c) 2013, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
# Authors:
# Todd Brandt <todd.e.brandt@linux.intel.com>
#
diff --git a/tools/power/pm-graph/sleepgraph.py b/tools/power/pm-graph/sleepgraph.py
index 52618f3444d4..d1a88d05e976 100755
--- a/tools/power/pm-graph/sleepgraph.py
+++ b/tools/power/pm-graph/sleepgraph.py
@@ -1,17 +1,9 @@
#!/usr/bin/python2
+# SPDX-License-Identifier: GPL-2.0-only
#
# Tool for analyzing suspend/resume timing
# Copyright (c) 2013, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
# Authors:
# Todd Brandt <todd.e.brandt@linux.intel.com>
#
diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
index 2fa3c5757bcb..2d6d342b148f 100755
--- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
@@ -1,4 +1,5 @@
#!/usr/bin/python
+# SPDX-License-Identifier: GPL-2.0-only
# -*- coding: utf-8 -*-
#
""" This utility can be used to debug and tune the performance of the
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index 1598b4fa0b11..045f5f7d68ab 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -9,7 +9,7 @@ ifeq ("$(origin O)", "command line")
endif
turbostat : turbostat.c
-override CFLAGS += -Wall
+override CFLAGS += -Wall -I../../../include
override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
override CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"'
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 9327c0ddc3a5..75fc4fb9901c 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* turbostat -- show CPU frequency and C-state residency
* on modern Intel and AMD processors.
*
* Copyright (c) 2013 Intel Corporation.
* Len Brown <len.brown@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#define _GNU_SOURCE
@@ -44,6 +32,7 @@
#include <cpuid.h>
#include <linux/capability.h>
#include <errno.h>
+#include <math.h>
char *proc_stat = "/proc/stat";
FILE *outf;
@@ -63,7 +52,6 @@ unsigned int dump_only;
unsigned int do_snb_cstates;
unsigned int do_knl_cstates;
unsigned int do_slm_cstates;
-unsigned int do_cnl_cstates;
unsigned int use_c1_residency_msr;
unsigned int has_aperf;
unsigned int has_epb;
@@ -141,9 +129,21 @@ unsigned int first_counter_read = 1;
#define RAPL_CORES_ENERGY_STATUS (1 << 9)
/* 0x639 MSR_PP0_ENERGY_STATUS */
+#define RAPL_PER_CORE_ENERGY (1 << 10)
+ /* Indicates cores energy collection is per-core,
+ * not per-package. */
+#define RAPL_AMD_F17H (1 << 11)
+ /* 0xc0010299 MSR_RAPL_PWR_UNIT */
+ /* 0xc001029a MSR_CORE_ENERGY_STAT */
+ /* 0xc001029b MSR_PKG_ENERGY_STAT */
#define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT)
#define TJMAX_DEFAULT 100
+/* MSRs that are not yet in the kernel-provided header. */
+#define MSR_RAPL_PWR_UNIT 0xc0010299
+#define MSR_CORE_ENERGY_STAT 0xc001029a
+#define MSR_PKG_ENERGY_STAT 0xc001029b
+
#define MAX(a, b) ((a) > (b) ? (a) : (b))
/*
@@ -187,6 +187,7 @@ struct core_data {
unsigned long long c7;
unsigned long long mc6_us; /* duplicate as per-core for now, even though per module */
unsigned int core_temp_c;
+ unsigned int core_energy; /* MSR_CORE_ENERGY_STAT */
unsigned int core_id;
unsigned long long counter[MAX_ADDED_COUNTERS];
} *core_even, *core_odd;
@@ -273,6 +274,7 @@ struct system_summary {
struct cpu_topology {
int physical_package_id;
+ int die_id;
int logical_cpu_id;
int physical_node_id;
int logical_node_id; /* 0-based count within the package */
@@ -283,6 +285,7 @@ struct cpu_topology {
struct topo_params {
int num_packages;
+ int num_die;
int num_cpus;
int num_cores;
int max_cpu_num;
@@ -314,9 +317,8 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg
int retval, pkg_no, core_no, thread_no, node_no;
for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
- for (core_no = 0; core_no < topo.cores_per_node; ++core_no) {
- for (node_no = 0; node_no < topo.nodes_per_pkg;
- node_no++) {
+ for (node_no = 0; node_no < topo.nodes_per_pkg; node_no++) {
+ for (core_no = 0; core_no < topo.cores_per_node; ++core_no) {
for (thread_no = 0; thread_no <
topo.threads_per_core; ++thread_no) {
struct thread_data *t;
@@ -442,6 +444,7 @@ struct msr_counter bic[] = {
{ 0x0, "CPU" },
{ 0x0, "APIC" },
{ 0x0, "X2APIC" },
+ { 0x0, "Die" },
};
#define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
@@ -495,6 +498,7 @@ struct msr_counter bic[] = {
#define BIC_CPU (1ULL << 47)
#define BIC_APIC (1ULL << 48)
#define BIC_X2APIC (1ULL << 49)
+#define BIC_Die (1ULL << 50)
#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
@@ -621,6 +625,8 @@ void print_header(char *delim)
outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : ""));
if (DO_BIC(BIC_Package))
outp += sprintf(outp, "%sPackage", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_Die))
+ outp += sprintf(outp, "%sDie", (printed++ ? delim : ""));
if (DO_BIC(BIC_Node))
outp += sprintf(outp, "%sNode", (printed++ ? delim : ""));
if (DO_BIC(BIC_Core))
@@ -667,7 +673,7 @@ void print_header(char *delim)
if (DO_BIC(BIC_CPU_c1))
outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : ""));
- if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates)
+ if (DO_BIC(BIC_CPU_c3))
outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : ""));
if (DO_BIC(BIC_CPU_c6))
outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : ""));
@@ -680,6 +686,14 @@ void print_header(char *delim)
if (DO_BIC(BIC_CoreTmp))
outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : ""));
+ if (do_rapl && !rapl_joules) {
+ if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
+ outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
+ } else if (do_rapl && rapl_joules) {
+ if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
+ outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
+ }
+
for (mp = sys.cp; mp; mp = mp->next) {
if (mp->format == FORMAT_RAW) {
if (mp->width == 64)
@@ -734,7 +748,7 @@ void print_header(char *delim)
if (do_rapl && !rapl_joules) {
if (DO_BIC(BIC_PkgWatt))
outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : ""));
- if (DO_BIC(BIC_CorWatt))
+ if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
if (DO_BIC(BIC_GFXWatt))
outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : ""));
@@ -747,7 +761,7 @@ void print_header(char *delim)
} else if (do_rapl && rapl_joules) {
if (DO_BIC(BIC_Pkg_J))
outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : ""));
- if (DO_BIC(BIC_Cor_J))
+ if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
if (DO_BIC(BIC_GFX_J))
outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : ""));
@@ -808,6 +822,7 @@ int dump_counters(struct thread_data *t, struct core_data *c,
outp += sprintf(outp, "c6: %016llX\n", c->c6);
outp += sprintf(outp, "c7: %016llX\n", c->c7);
outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
+ outp += sprintf(outp, "Joules: %0X\n", c->core_energy);
for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n",
@@ -904,6 +919,8 @@ int format_counters(struct thread_data *t, struct core_data *c,
if (t == &average.threads) {
if (DO_BIC(BIC_Package))
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_Die))
+ outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
if (DO_BIC(BIC_Node))
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
if (DO_BIC(BIC_Core))
@@ -921,6 +938,12 @@ int format_counters(struct thread_data *t, struct core_data *c,
else
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
}
+ if (DO_BIC(BIC_Die)) {
+ if (c)
+ outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].die_id);
+ else
+ outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+ }
if (DO_BIC(BIC_Node)) {
if (t)
outp += sprintf(outp, "%s%d",
@@ -1003,7 +1026,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
goto done;
- if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates)
+ if (DO_BIC(BIC_CPU_c3))
outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc);
if (DO_BIC(BIC_CPU_c6))
outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc);
@@ -1033,6 +1056,20 @@ int format_counters(struct thread_data *t, struct core_data *c,
}
}
+ /*
+ * If measurement interval exceeds minimum RAPL Joule Counter range,
+ * indicate that results are suspect by printing "**" in fraction place.
+ */
+ if (interval_float < rapl_joule_counter_range)
+ fmt8 = "%s%.2f";
+ else
+ fmt8 = "%6.0f**";
+
+ if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float);
+ if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
+ outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units);
+
/* print per-package data only for 1st core in package */
if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
goto done;
@@ -1085,18 +1122,9 @@ int format_counters(struct thread_data *t, struct core_data *c,
if (DO_BIC(BIC_SYS_LPI))
outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float);
- /*
- * If measurement interval exceeds minimum RAPL Joule Counter range,
- * indicate that results are suspect by printing "**" in fraction place.
- */
- if (interval_float < rapl_joule_counter_range)
- fmt8 = "%s%.2f";
- else
- fmt8 = "%6.0f**";
-
if (DO_BIC(BIC_PkgWatt))
outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float);
- if (DO_BIC(BIC_CorWatt))
+ if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float);
if (DO_BIC(BIC_GFXWatt))
outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float);
@@ -1104,7 +1132,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float);
if (DO_BIC(BIC_Pkg_J))
outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units);
- if (DO_BIC(BIC_Cor_J))
+ if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units);
if (DO_BIC(BIC_GFX_J))
outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units);
@@ -1249,6 +1277,8 @@ delta_core(struct core_data *new, struct core_data *old)
old->core_temp_c = new->core_temp_c;
old->mc6_us = new->mc6_us - old->mc6_us;
+ DELTA_WRAP32(new->core_energy, old->core_energy);
+
for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
if (mp->format == FORMAT_RAW)
old->counter[i] = new->counter[i];
@@ -1391,6 +1421,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
c->c7 = 0;
c->mc6_us = 0;
c->core_temp_c = 0;
+ c->core_energy = 0;
p->pkg_wtd_core_c0 = 0;
p->pkg_any_core_c0 = 0;
@@ -1473,6 +1504,8 @@ int sum_counters(struct thread_data *t, struct core_data *c,
average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
+ average.cores.core_energy += c->core_energy;
+
for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
if (mp->format == FORMAT_RAW)
continue;
@@ -1818,7 +1851,7 @@ retry:
if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
goto done;
- if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) {
+ if (DO_BIC(BIC_CPU_c3)) {
if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
return -6;
}
@@ -1845,6 +1878,12 @@ retry:
c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
}
+ if (do_rapl & RAPL_AMD_F17H) {
+ if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr))
+ return -14;
+ c->core_energy = msr & 0xFFFFFFFF;
+ }
+
for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
if (get_mp(cpu, mp, &c->counter[i]))
return -10;
@@ -1934,6 +1973,11 @@ retry:
return -16;
p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
}
+ if (do_rapl & RAPL_AMD_F17H) {
+ if (get_msr(cpu, MSR_PKG_ENERGY_STAT, &msr))
+ return -13;
+ p->energy_pkg = msr & 0xFFFFFFFF;
+ }
if (DO_BIC(BIC_PkgTmp)) {
if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
return -17;
@@ -2456,6 +2500,8 @@ void free_all_buffers(void)
/*
* Parse a file containing a single int.
+ * Return 0 if file can not be opened
+ * Exit if file can be opened, but can not be parsed
*/
int parse_int_file(const char *fmt, ...)
{
@@ -2467,7 +2513,9 @@ int parse_int_file(const char *fmt, ...)
va_start(args, fmt);
vsnprintf(path, sizeof(path), fmt, args);
va_end(args);
- filep = fopen_or_die(path, "r");
+ filep = fopen(path, "r");
+ if (!filep)
+ return 0;
if (fscanf(filep, "%d", &value) != 1)
err(1, "%s: failed to parse number from file", path);
fclose(filep);
@@ -2488,6 +2536,11 @@ int get_physical_package_id(int cpu)
return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
}
+int get_die_id(int cpu)
+{
+ return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/die_id", cpu);
+}
+
int get_core_id(int cpu)
{
return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
@@ -2578,7 +2631,8 @@ int get_thread_siblings(struct cpu_topology *thiscpu)
filep = fopen_or_die(path, "r");
do {
offset -= BITMASK_SIZE;
- fscanf(filep, "%lx%c", &map, &character);
+ if (fscanf(filep, "%lx%c", &map, &character) != 2)
+ err(1, "%s: failed to parse file", path);
for (shift = 0; shift < BITMASK_SIZE; shift++) {
if ((map >> shift) & 0x1) {
so = shift + offset;
@@ -2855,8 +2909,11 @@ int snapshot_cpu_lpi_us(void)
fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r");
retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us);
- if (retval != 1)
- err(1, "CPU LPI");
+ if (retval != 1) {
+ fprintf(stderr, "Disabling Low Power Idle CPU output\n");
+ BIC_NOT_PRESENT(BIC_CPU_LPI);
+ return -1;
+ }
fclose(fp);
@@ -2878,9 +2935,11 @@ int snapshot_sys_lpi_us(void)
fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r");
retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us);
- if (retval != 1)
- err(1, "SYS LPI");
-
+ if (retval != 1) {
+ fprintf(stderr, "Disabling Low Power Idle System output\n");
+ BIC_NOT_PRESENT(BIC_SYS_LPI);
+ return -1;
+ }
fclose(fp);
return 0;
@@ -3410,14 +3469,14 @@ dump_sysfs_cstate_config(void)
input = fopen(path, "r");
if (input == NULL)
continue;
- fgets(name_buf, sizeof(name_buf), input);
+ if (!fgets(name_buf, sizeof(name_buf), input))
+ err(1, "%s: failed to read file", path);
/* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
sp = strchr(name_buf, '-');
if (!sp)
sp = strchrnul(name_buf, '\n');
*sp = '\0';
-
fclose(input);
sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc",
@@ -3425,7 +3484,8 @@ dump_sysfs_cstate_config(void)
input = fopen(path, "r");
if (input == NULL)
continue;
- fgets(desc, sizeof(desc), input);
+ if (!fgets(desc, sizeof(desc), input))
+ err(1, "%s: failed to read file", path);
fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc);
fclose(input);
@@ -3444,20 +3504,22 @@ dump_sysfs_pstate_config(void)
base_cpu);
input = fopen(path, "r");
if (input == NULL) {
- fprintf(stderr, "NSFOD %s\n", path);
+ fprintf(outf, "NSFOD %s\n", path);
return;
}
- fgets(driver_buf, sizeof(driver_buf), input);
+ if (!fgets(driver_buf, sizeof(driver_buf), input))
+ err(1, "%s: failed to read file", path);
fclose(input);
sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor",
base_cpu);
input = fopen(path, "r");
if (input == NULL) {
- fprintf(stderr, "NSFOD %s\n", path);
+ fprintf(outf, "NSFOD %s\n", path);
return;
}
- fgets(governor_buf, sizeof(governor_buf), input);
+ if (!fgets(governor_buf, sizeof(governor_buf), input))
+ err(1, "%s: failed to read file", path);
fclose(input);
fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf);
@@ -3466,7 +3528,8 @@ dump_sysfs_pstate_config(void)
sprintf(path, "/sys/devices/system/cpu/cpufreq/boost");
input = fopen(path, "r");
if (input != NULL) {
- fscanf(input, "%d", &turbo);
+ if (fscanf(input, "%d", &turbo) != 1)
+ err(1, "%s: failed to parse number from file", path);
fprintf(outf, "cpufreq boost: %d\n", turbo);
fclose(input);
}
@@ -3474,7 +3537,8 @@ dump_sysfs_pstate_config(void)
sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo");
input = fopen(path, "r");
if (input != NULL) {
- fscanf(input, "%d", &turbo);
+ if (fscanf(input, "%d", &turbo) != 1)
+ err(1, "%s: failed to parse number from file", path);
fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo);
fclose(input);
}
@@ -3718,7 +3782,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data
#define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
-double get_tdp(unsigned int model)
+double get_tdp_intel(unsigned int model)
{
unsigned long long msr;
@@ -3735,6 +3799,16 @@ double get_tdp(unsigned int model)
}
}
+double get_tdp_amd(unsigned int family)
+{
+ switch (family) {
+ case 0x17:
+ default:
+ /* This is the max stock TDP of HEDT/Server Fam17h chips */
+ return 250.0;
+ }
+}
+
/*
* rapl_dram_energy_units_probe()
* Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
@@ -3754,21 +3828,12 @@ rapl_dram_energy_units_probe(int model, double rapl_energy_units)
}
}
-
-/*
- * rapl_probe()
- *
- * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
- */
-void rapl_probe(unsigned int family, unsigned int model)
+void rapl_probe_intel(unsigned int family, unsigned int model)
{
unsigned long long msr;
unsigned int time_unit;
double tdp;
- if (!genuine_intel)
- return;
-
if (family != 6)
return;
@@ -3892,13 +3957,69 @@ void rapl_probe(unsigned int family, unsigned int model)
rapl_time_units = 1.0 / (1 << (time_unit));
- tdp = get_tdp(model);
+ tdp = get_tdp_intel(model);
rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
if (!quiet)
fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
+}
- return;
+void rapl_probe_amd(unsigned int family, unsigned int model)
+{
+ unsigned long long msr;
+ unsigned int eax, ebx, ecx, edx;
+ unsigned int has_rapl = 0;
+ double tdp;
+
+ if (max_extended_level >= 0x80000007) {
+ __cpuid(0x80000007, eax, ebx, ecx, edx);
+ /* RAPL (Fam 17h) */
+ has_rapl = edx & (1 << 14);
+ }
+
+ if (!has_rapl)
+ return;
+
+ switch (family) {
+ case 0x17: /* Zen, Zen+ */
+ do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY;
+ if (rapl_joules) {
+ BIC_PRESENT(BIC_Pkg_J);
+ BIC_PRESENT(BIC_Cor_J);
+ } else {
+ BIC_PRESENT(BIC_PkgWatt);
+ BIC_PRESENT(BIC_CorWatt);
+ }
+ break;
+ default:
+ return;
+ }
+
+ if (get_msr(base_cpu, MSR_RAPL_PWR_UNIT, &msr))
+ return;
+
+ rapl_time_units = ldexp(1.0, -(msr >> 16 & 0xf));
+ rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f));
+ rapl_power_units = ldexp(1.0, -(msr & 0xf));
+
+ tdp = get_tdp_amd(model);
+
+ rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
+ if (!quiet)
+ fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
+}
+
+/*
+ * rapl_probe()
+ *
+ * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
+ */
+void rapl_probe(unsigned int family, unsigned int model)
+{
+ if (genuine_intel)
+ rapl_probe_intel(family, model);
+ if (authentic_amd)
+ rapl_probe_amd(family, model);
}
void perf_limit_reasons_probe(unsigned int family, unsigned int model)
@@ -4003,6 +4124,7 @@ void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
{
unsigned long long msr;
+ const char *msr_name;
int cpu;
if (!do_rapl)
@@ -4018,10 +4140,17 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
return -1;
}
- if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
- return -1;
+ if (do_rapl & RAPL_AMD_F17H) {
+ msr_name = "MSR_RAPL_PWR_UNIT";
+ if (get_msr(cpu, MSR_RAPL_PWR_UNIT, &msr))
+ return -1;
+ } else {
+ msr_name = "MSR_RAPL_POWER_UNIT";
+ if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
+ return -1;
+ }
- fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr,
+ fprintf(outf, "cpu%d: %s: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr_name, msr,
rapl_power_units, rapl_energy_units, rapl_time_units);
if (do_rapl & RAPL_PKG_POWER_INFO) {
@@ -4451,6 +4580,9 @@ unsigned int intel_model_duplicates(unsigned int model)
case INTEL_FAM6_KABYLAKE_MOBILE:
case INTEL_FAM6_KABYLAKE_DESKTOP:
return INTEL_FAM6_SKYLAKE_MOBILE;
+
+ case INTEL_FAM6_ICELAKE_MOBILE:
+ return INTEL_FAM6_CANNONLAKE_MOBILE;
}
return model;
}
@@ -4702,7 +4834,9 @@ void process_cpuid()
}
do_slm_cstates = is_slm(family, model);
do_knl_cstates = is_knl(family, model);
- do_cnl_cstates = is_cnl(family, model);
+
+ if (do_slm_cstates || do_knl_cstates || is_cnl(family, model))
+ BIC_NOT_PRESENT(BIC_CPU_c3);
if (!quiet)
decode_misc_pwr_mgmt_msr();
@@ -4769,6 +4903,7 @@ void topology_probe()
int i;
int max_core_id = 0;
int max_package_id = 0;
+ int max_die_id = 0;
int max_siblings = 0;
/* Initialize num_cpus, max_cpu_num */
@@ -4835,6 +4970,11 @@ void topology_probe()
if (cpus[i].physical_package_id > max_package_id)
max_package_id = cpus[i].physical_package_id;
+ /* get die information */
+ cpus[i].die_id = get_die_id(i);
+ if (cpus[i].die_id > max_die_id)
+ max_die_id = cpus[i].die_id;
+
/* get numa node information */
cpus[i].physical_node_id = get_physical_node_id(&cpus[i]);
if (cpus[i].physical_node_id > topo.max_node_num)
@@ -4860,6 +5000,13 @@ void topology_probe()
if (!summary_only && topo.cores_per_node > 1)
BIC_PRESENT(BIC_Core);
+ topo.num_die = max_die_id + 1;
+ if (debug > 1)
+ fprintf(outf, "max_die_id %d, sizing for %d die\n",
+ max_die_id, topo.num_die);
+ if (!summary_only && topo.num_die > 1)
+ BIC_PRESENT(BIC_Die);
+
topo.num_packages = max_package_id + 1;
if (debug > 1)
fprintf(outf, "max_package_id %d, sizing for %d packages\n",
@@ -4884,8 +5031,8 @@ void topology_probe()
if (cpu_is_not_present(i))
continue;
fprintf(outf,
- "cpu %d pkg %d node %d lnode %d core %d thread %d\n",
- i, cpus[i].physical_package_id,
+ "cpu %d pkg %d die %d node %d lnode %d core %d thread %d\n",
+ i, cpus[i].physical_package_id, cpus[i].die_id,
cpus[i].physical_node_id,
cpus[i].logical_node_id,
cpus[i].physical_core_id,
@@ -5077,6 +5224,9 @@ int fork_it(char **argv)
signal(SIGQUIT, SIG_IGN);
if (waitpid(child_pid, &status, 0) == -1)
err(status, "waitpid");
+
+ if (WIFEXITED(status))
+ status = WEXITSTATUS(status);
}
/*
* n.b. fork_it() does not check for errors from for_all_cpus()
@@ -5119,7 +5269,7 @@ int get_and_dump_counters(void)
}
void print_version() {
- fprintf(outf, "turbostat version 18.07.27"
+ fprintf(outf, "turbostat version 19.03.20"
" - Len Brown <lenb@kernel.org>\n");
}
@@ -5316,7 +5466,8 @@ void probe_sysfs(void)
input = fopen(path, "r");
if (input == NULL)
continue;
- fgets(name_buf, sizeof(name_buf), input);
+ if (!fgets(name_buf, sizeof(name_buf), input))
+ err(1, "%s: failed to read file", path);
/* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
sp = strchr(name_buf, '-');
@@ -5343,7 +5494,8 @@ void probe_sysfs(void)
input = fopen(path, "r");
if (input == NULL)
continue;
- fgets(name_buf, sizeof(name_buf), input);
+ if (!fgets(name_buf, sizeof(name_buf), input))
+ err(1, "%s: failed to read file", path);
/* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
sp = strchr(name_buf, '-');
if (!sp)
diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile
index ae7a0e09b722..1fdeef864e7c 100644
--- a/tools/power/x86/x86_energy_perf_policy/Makefile
+++ b/tools/power/x86/x86_energy_perf_policy/Makefile
@@ -9,7 +9,7 @@ ifeq ("$(origin O)", "command line")
endif
x86_energy_perf_policy : x86_energy_perf_policy.c
-override CFLAGS += -Wall
+override CFLAGS += -Wall -I../../../include
override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
%: %.c
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
index 65bbe627a425..34a796b303fe 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* x86_energy_perf_policy -- set the energy versus performance
* policy preference bias on recent X86 processors.
@@ -5,8 +6,6 @@
/*
* Copyright (c) 2010 - 2017 Intel Corporation.
* Len Brown <len.brown@intel.com>
- *
- * This program is released under GPL v2
*/
#define _GNU_SOURCE
diff --git a/tools/spi/Makefile b/tools/spi/Makefile
index 815d15589177..5c342e655e55 100644
--- a/tools/spi/Makefile
+++ b/tools/spi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
include ../scripts/Makefile.include
bindir ?= /usr/bin
diff --git a/tools/spi/spidev_test.c b/tools/spi/spidev_test.c
index 4c12e6aea5d5..3559e7646256 100644
--- a/tools/spi/spidev_test.c
+++ b/tools/spi/spidev_test.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* SPI testing utility (using spidev driver)
*
* Copyright (c) 2007 MontaVista Software, Inc.
* Copyright (c) 2007 Anton Vorontsov <avorontsov@ru.mvista.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
* Cross-compile with cross-gcc -I/path/to/cross-kernel/include
*/
diff --git a/tools/testing/ktest/config-bisect.pl b/tools/testing/ktest/config-bisect.pl
index b28feea7c363..72525426654b 100755
--- a/tools/testing/ktest/config-bisect.pl
+++ b/tools/testing/ktest/config-bisect.pl
@@ -1,10 +1,9 @@
#!/usr/bin/perl -w
+# SPDX-License-Identifier: GPL-2.0-only
#
# Copyright 2015 - Steven Rostedt, Red Hat Inc.
# Copyright 2017 - Steven Rostedt, VMware, Inc.
#
-# Licensed under the terms of the GNU GPL License version 2
-#
# usage:
# config-bisect.pl [options] good-config bad-config [good|bad]
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 87af8a68ab25..220d04f958a6 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -1,7 +1,7 @@
#!/usr/bin/perl -w
+# SPDX-License-Identifier: GPL-2.0-only
#
# Copyright 2010 - Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
-# Licensed under the terms of the GNU GPL License version 2
#
use strict;
@@ -58,11 +58,13 @@ my %default = (
"SCP_TO_TARGET" => "scp \$SRC_FILE \$SSH_USER\@\$MACHINE:\$DST_FILE",
"SCP_TO_TARGET_INSTALL" => "\${SCP_TO_TARGET}",
"REBOOT" => "ssh \$SSH_USER\@\$MACHINE reboot",
+ "REBOOT_RETURN_CODE" => 255,
"STOP_AFTER_SUCCESS" => 10,
"STOP_AFTER_FAILURE" => 60,
"STOP_TEST_AFTER" => 600,
"MAX_MONITOR_WAIT" => 1800,
"GRUB_REBOOT" => "grub2-reboot",
+ "GRUB_BLS_GET" => "grubby --info=ALL",
"SYSLINUX" => "extlinux",
"SYSLINUX_PATH" => "/boot/extlinux",
"CONNECT_TIMEOUT" => 25,
@@ -105,6 +107,7 @@ my $reboot_type;
my $reboot_script;
my $power_cycle;
my $reboot;
+my $reboot_return_code;
my $reboot_on_error;
my $switch_to_good;
my $switch_to_test;
@@ -123,6 +126,7 @@ my $last_grub_menu;
my $grub_file;
my $grub_number;
my $grub_reboot;
+my $grub_bls_get;
my $syslinux;
my $syslinux_path;
my $syslinux_label;
@@ -278,6 +282,7 @@ my %option_map = (
"POST_BUILD_DIE" => \$post_build_die,
"POWER_CYCLE" => \$power_cycle,
"REBOOT" => \$reboot,
+ "REBOOT_RETURN_CODE" => \$reboot_return_code,
"BUILD_NOCLEAN" => \$noclean,
"MIN_CONFIG" => \$minconfig,
"OUTPUT_MIN_CONFIG" => \$output_minconfig,
@@ -292,6 +297,7 @@ my %option_map = (
"GRUB_MENU" => \$grub_menu,
"GRUB_FILE" => \$grub_file,
"GRUB_REBOOT" => \$grub_reboot,
+ "GRUB_BLS_GET" => \$grub_bls_get,
"SYSLINUX" => \$syslinux,
"SYSLINUX_PATH" => \$syslinux_path,
"SYSLINUX_LABEL" => \$syslinux_label,
@@ -437,7 +443,7 @@ EOF
;
$config_help{"REBOOT_TYPE"} = << "EOF"
Way to reboot the box to the test kernel.
- Only valid options so far are "grub", "grub2", "syslinux", and "script".
+ Only valid options so far are "grub", "grub2", "grub2bls", "syslinux", and "script".
If you specify grub, it will assume grub version 1
and will search in /boot/grub/menu.lst for the title \$GRUB_MENU
@@ -451,6 +457,8 @@ $config_help{"REBOOT_TYPE"} = << "EOF"
If you specify grub2, then you also need to specify both \$GRUB_MENU
and \$GRUB_FILE.
+ If you specify grub2bls, then you also need to specify \$GRUB_MENU.
+
If you specify syslinux, then you may use SYSLINUX to define the syslinux
command (defaults to extlinux), and SYSLINUX_PATH to specify the path to
the syslinux install (defaults to /boot/extlinux). But you have to specify
@@ -476,6 +484,9 @@ $config_help{"GRUB_MENU"} = << "EOF"
menu must be a non-nested menu. Add the quotes used in the menu
to guarantee your selection, as the first menuentry with the content
of \$GRUB_MENU that is found will be used.
+
+ For grub2bls, \$GRUB_MENU is searched on the result of \$GRUB_BLS_GET
+ command for the lines that begin with "title".
EOF
;
$config_help{"GRUB_FILE"} = << "EOF"
@@ -692,7 +703,7 @@ sub get_mandatory_configs {
}
}
- if ($rtype eq "grub") {
+ if (($rtype eq "grub") or ($rtype eq "grub2bls")) {
get_mandatory_config("GRUB_MENU");
}
@@ -1437,16 +1448,27 @@ sub do_not_reboot {
my $in_die = 0;
+sub get_test_name() {
+ my $name;
+
+ if (defined($test_name)) {
+ $name = "$test_name:$test_type";
+ } else {
+ $name = $test_type;
+ }
+ return $name;
+}
+
sub dodie {
# avoid recusion
return if ($in_die);
$in_die = 1;
- doprint "CRITICAL FAILURE... ", @_, "\n";
-
my $i = $iteration;
+ doprint "CRITICAL FAILURE... [TEST $i] ", @_, "\n";
+
if ($reboot_on_error && !do_not_reboot) {
doprint "REBOOTING\n";
@@ -1462,7 +1484,8 @@ sub dodie {
}
if ($email_on_error) {
- send_email("KTEST: critical failure for your [$test_type] test",
+ my $name = get_test_name;
+ send_email("KTEST: critical failure for test $i [$name]",
"Your test started at $script_start_time has failed with:\n@_\n");
}
@@ -1737,6 +1760,7 @@ sub run_command {
my $dord = 0;
my $dostdout = 0;
my $pid;
+ my $command_orig = $command;
$command =~ s/\$SSH_USER/$ssh_user/g;
$command =~ s/\$MACHINE/$machine/g;
@@ -1791,6 +1815,11 @@ sub run_command {
# shift 8 for real exit status
$run_command_status = $? >> 8;
+ if ($command_orig eq $default{REBOOT} &&
+ $run_command_status == $reboot_return_code) {
+ $run_command_status = 0;
+ }
+
close(CMD);
close(LOG) if ($dolog);
close(RD) if ($dord);
@@ -1850,35 +1879,37 @@ sub run_scp_mod {
return run_scp($src, $dst, $cp_scp);
}
-sub get_grub2_index {
+sub _get_grub_index {
+
+ my ($command, $target, $skip) = @_;
return if (defined($grub_number) && defined($last_grub_menu) &&
$last_grub_menu eq $grub_menu && defined($last_machine) &&
$last_machine eq $machine);
- doprint "Find grub2 menu ... ";
+ doprint "Find $reboot_type menu ... ";
$grub_number = -1;
my $ssh_grub = $ssh_exec;
- $ssh_grub =~ s,\$SSH_COMMAND,cat $grub_file,g;
+ $ssh_grub =~ s,\$SSH_COMMAND,$command,g;
open(IN, "$ssh_grub |")
- or dodie "unable to get $grub_file";
+ or dodie "unable to execute $command";
my $found = 0;
while (<IN>) {
- if (/^menuentry.*$grub_menu/) {
+ if (/$target/) {
$grub_number++;
$found = 1;
last;
- } elsif (/^menuentry\s|^submenu\s/) {
+ } elsif (/$skip/) {
$grub_number++;
}
}
close(IN);
- dodie "Could not find '$grub_menu' in $grub_file on $machine"
+ dodie "Could not find '$grub_menu' through $command on $machine"
if (!$found);
doprint "$grub_number\n";
$last_grub_menu = $grub_menu;
@@ -1887,45 +1918,34 @@ sub get_grub2_index {
sub get_grub_index {
- if ($reboot_type eq "grub2") {
- get_grub2_index;
- return;
- }
+ my $command;
+ my $target;
+ my $skip;
+ my $grub_menu_qt;
- if ($reboot_type ne "grub") {
+ if ($reboot_type !~ /^grub/) {
return;
}
- return if (defined($grub_number) && defined($last_grub_menu) &&
- $last_grub_menu eq $grub_menu && defined($last_machine) &&
- $last_machine eq $machine);
- doprint "Find grub menu ... ";
- $grub_number = -1;
-
- my $ssh_grub = $ssh_exec;
- $ssh_grub =~ s,\$SSH_COMMAND,cat /boot/grub/menu.lst,g;
+ $grub_menu_qt = quotemeta($grub_menu);
- open(IN, "$ssh_grub |")
- or dodie "unable to get menu.lst";
-
- my $found = 0;
-
- while (<IN>) {
- if (/^\s*title\s+$grub_menu\s*$/) {
- $grub_number++;
- $found = 1;
- last;
- } elsif (/^\s*title\s/) {
- $grub_number++;
- }
+ if ($reboot_type eq "grub") {
+ $command = "cat /boot/grub/menu.lst";
+ $target = '^\s*title\s+' . $grub_menu_qt . '\s*$';
+ $skip = '^\s*title\s';
+ } elsif ($reboot_type eq "grub2") {
+ $command = "cat $grub_file";
+ $target = '^menuentry.*' . $grub_menu_qt;
+ $skip = '^menuentry\s|^submenu\s';
+ } elsif ($reboot_type eq "grub2bls") {
+ $command = $grub_bls_get;
+ $target = '^title=.*' . $grub_menu_qt;
+ $skip = '^title=';
+ } else {
+ return;
}
- close(IN);
- dodie "Could not find '$grub_menu' in /boot/grub/menu on $machine"
- if (!$found);
- doprint "$grub_number\n";
- $last_grub_menu = $grub_menu;
- $last_machine = $machine;
+ _get_grub_index($command, $target, $skip);
}
sub wait_for_input
@@ -4193,7 +4213,8 @@ sub send_email {
sub cancel_test {
if ($email_when_canceled) {
- send_email("KTEST: Your [$test_type] test was cancelled",
+ my $name = get_test_name;
+ send_email("KTEST: Your [$name] test was cancelled",
"Your test started at $script_start_time was cancelled: sig int");
}
die "\nCaught Sig Int, test interrupted: $!\n"
@@ -4247,7 +4268,8 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
run_command $pre_ktest;
}
if ($email_when_started) {
- send_email("KTEST: Your [$test_type] test was started",
+ my $name = get_test_name;
+ send_email("KTEST: Your [$name] test was started",
"Your test was started on $script_start_time");
}
}
@@ -4278,7 +4300,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
if (!$buildonly) {
$target = "$ssh_user\@$machine";
- if ($reboot_type eq "grub") {
+ if (($reboot_type eq "grub") or ($reboot_type eq "grub2bls")) {
dodie "GRUB_MENU not defined" if (!defined($grub_menu));
} elsif ($reboot_type eq "grub2") {
dodie "GRUB_MENU not defined" if (!defined($grub_menu));
@@ -4398,7 +4420,9 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
}
if (defined($final_post_ktest)) {
- run_command $final_post_ktest;
+
+ my $cp_final_post_ktest = eval_kernel_version $final_post_ktest;
+ run_command $cp_final_post_ktest;
}
if ($opt{"POWEROFF_ON_SUCCESS"}) {
@@ -4414,7 +4438,7 @@ if ($opt{"POWEROFF_ON_SUCCESS"}) {
doprint "\n $successes of $opt{NUM_TESTS} tests were successful\n\n";
if ($email_when_finished) {
- send_email("KTEST: Your [$test_type] test has finished!",
+ send_email("KTEST: Your test has finished!",
"$successes of $opt{NUM_TESTS} tests started at $script_start_time were successful!");
}
exit 0;
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index 6ca6ca0ce695..c3bc933d437b 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -349,13 +349,13 @@
# option to boot to with GRUB_REBOOT
#GRUB_FILE = /boot/grub2/grub.cfg
-# The tool for REBOOT_TYPE = grub2 to set the next reboot kernel
+# The tool for REBOOT_TYPE = grub2 or grub2bls to set the next reboot kernel
# to boot into (one shot mode).
# (default grub2_reboot)
#GRUB_REBOOT = grub2_reboot
# The grub title name for the test kernel to boot
-# (Only mandatory if REBOOT_TYPE = grub or grub2)
+# (Only mandatory if REBOOT_TYPE = grub or grub2 or grub2bls)
#
# Note, ktest.pl will not update the grub menu.lst, you need to
# manually add an option for the test. ktest.pl will search
@@ -374,6 +374,10 @@
# do a: GRUB_MENU = 'Test Kernel'
# For customizing, add your entry in /etc/grub.d/40_custom.
#
+# For grub2bls, a search of "title"s are done. The menu is found
+# by searching for the contents of GRUB_MENU in the line that starts
+# with "title".
+#
#GRUB_MENU = Test Kernel
# For REBOOT_TYPE = syslinux, the name of the syslinux executable
@@ -479,6 +483,11 @@
# default (undefined)
#POST_KTEST = ${SSH} ~/dismantle_test
+# If you want to remove the kernel entry in Boot Loader Specification (BLS)
+# environment, use kernel-install command.
+# Here's the example:
+#POST_KTEST = ssh root@Test "/usr/bin/kernel-install remove $KERNEL_VERSION"
+
# The default test type (default test)
# The test types may be:
# build - only build the kernel, do nothing else
@@ -530,6 +539,11 @@
# or on some systems:
#POST_INSTALL = ssh user@target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION
+# If you want to add the kernel entry in Boot Loader Specification (BLS)
+# environment, use kernel-install command.
+# Here's the example:
+#POST_INSTALL = ssh root@Test "/usr/bin/kernel-install add $KERNEL_VERSION /boot/vmlinuz-$KERNEL_VERSION"
+
# If for some reason you just want to boot the kernel and you do not
# want the test to install anything new. For example, you may just want
# to boot test the same kernel over and over and do not want to go through
@@ -593,6 +607,8 @@
# For REBOOT_TYPE = grub2, you must define both GRUB_MENU and
# GRUB_FILE.
#
+# For REBOOT_TYPE = grub2bls, you must define GRUB_MENU.
+#
# For REBOOT_TYPE = syslinux, you must define SYSLINUX_LABEL, and
# perhaps modify SYSLINUX (default extlinux) and SYSLINUX_PATH
# (default /boot/extlinux)
@@ -887,6 +903,10 @@
# The variables SSH_USER and MACHINE are defined.
#REBOOT = ssh $SSH_USER@$MACHINE reboot
+# The return code of REBOOT
+# (default 255)
+#REBOOT_RETURN_CODE = 255
+
# The way triple faults are detected is by testing the kernel
# banner. If the kernel banner for the kernel we are testing is
# found, and then later a kernel banner for another kernel version
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index 10ddf223055b..c4a9196d794c 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -35,6 +35,8 @@ obj-$(CONFIG_DAX) += dax.o
endif
obj-$(CONFIG_DEV_DAX) += device_dax.o
obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
+obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem_core.o
+obj-$(CONFIG_DEV_DAX_PMEM_COMPAT) += dax_pmem_compat.o
nfit-y := $(ACPI_SRC)/core.o
nfit-y += $(ACPI_SRC)/intel.o
@@ -57,6 +59,7 @@ nd_e820-y := $(NVDIMM_SRC)/e820.o
nd_e820-y += config_check.o
dax-y := $(DAX_SRC)/super.o
+dax-y += $(DAX_SRC)/bus.o
dax-y += config_check.o
device_dax-y := $(DAX_SRC)/device.o
@@ -64,7 +67,12 @@ device_dax-y += dax-dev.o
device_dax-y += device_dax_test.o
device_dax-y += config_check.o
-dax_pmem-y := $(DAX_SRC)/pmem.o
+dax_pmem-y := $(DAX_SRC)/pmem/pmem.o
+dax_pmem-y += dax_pmem_test.o
+dax_pmem_core-y := $(DAX_SRC)/pmem/core.o
+dax_pmem_core-y += dax_pmem_core_test.o
+dax_pmem_compat-y := $(DAX_SRC)/pmem/compat.o
+dax_pmem_compat-y += dax_pmem_compat_test.o
dax_pmem-y += config_check.o
libnvdimm-y := $(NVDIMM_SRC)/core.o
diff --git a/tools/testing/nvdimm/dax-dev.c b/tools/testing/nvdimm/dax-dev.c
index 36ee3d8797c3..7e5d979e73cb 100644
--- a/tools/testing/nvdimm/dax-dev.c
+++ b/tools/testing/nvdimm/dax-dev.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include "test/nfit_test.h"
#include <linux/mm.h>
@@ -17,20 +9,11 @@
phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
unsigned long size)
{
- struct resource *res;
+ struct resource *res = &dev_dax->region->res;
phys_addr_t addr;
- int i;
- for (i = 0; i < dev_dax->num_resources; i++) {
- res = &dev_dax->res[i];
- addr = pgoff * PAGE_SIZE + res->start;
- if (addr >= res->start && addr <= res->end)
- break;
- pgoff -= PHYS_PFN(resource_size(res));
- }
-
- if (i < dev_dax->num_resources) {
- res = &dev_dax->res[i];
+ addr = pgoff * PAGE_SIZE + res->start;
+ if (addr >= res->start && addr <= res->end) {
if (addr + size - 1 <= res->end) {
if (get_nfit_res(addr)) {
struct page *page;
@@ -44,6 +27,5 @@ phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
return addr;
}
}
-
return -1;
}
diff --git a/tools/testing/nvdimm/dax_pmem_compat_test.c b/tools/testing/nvdimm/dax_pmem_compat_test.c
new file mode 100644
index 000000000000..7cd1877f3765
--- /dev/null
+++ b/tools/testing/nvdimm/dax_pmem_compat_test.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2019 Intel Corporation. All rights reserved.
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include "watermark.h"
+
+nfit_test_watermark(dax_pmem_compat);
diff --git a/tools/testing/nvdimm/dax_pmem_core_test.c b/tools/testing/nvdimm/dax_pmem_core_test.c
new file mode 100644
index 000000000000..a4249cdbeec1
--- /dev/null
+++ b/tools/testing/nvdimm/dax_pmem_core_test.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2019 Intel Corporation. All rights reserved.
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include "watermark.h"
+
+nfit_test_watermark(dax_pmem_core);
diff --git a/tools/testing/nvdimm/dax_pmem_test.c b/tools/testing/nvdimm/dax_pmem_test.c
new file mode 100644
index 000000000000..fd4c94a5aa02
--- /dev/null
+++ b/tools/testing/nvdimm/dax_pmem_test.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2019 Intel Corporation. All rights reserved.
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include "watermark.h"
+
+nfit_test_watermark(dax_pmem);
diff --git a/tools/testing/nvdimm/pmem-dax.c b/tools/testing/nvdimm/pmem-dax.c
index 2e7fd8227969..af19c85558e7 100644
--- a/tools/testing/nvdimm/pmem-dax.c
+++ b/tools/testing/nvdimm/pmem-dax.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2016, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include "test/nfit_test.h"
#include <linux/blkdev.h>
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index c6635fee27d8..076df22e4bda 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/memremap.h>
#include <linux/rculist.h>
@@ -108,7 +100,9 @@ static void nfit_test_kill(void *_pgmap)
{
struct dev_pagemap *pgmap = _pgmap;
+ WARN_ON(!pgmap || !pgmap->ref || !pgmap->kill || !pgmap->cleanup);
pgmap->kill(pgmap->ref);
+ pgmap->cleanup(pgmap->ref);
}
void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index b579f962451d..507e6f4cbb53 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/platform_device.h>
@@ -146,6 +138,7 @@ static int dimm_fail_cmd_code[ARRAY_SIZE(handle)];
struct nfit_test_sec {
u8 state;
u8 ext_state;
+ u8 old_state;
u8 passphrase[32];
u8 master_passphrase[32];
u64 overwrite_end_time;
@@ -225,6 +218,8 @@ static struct workqueue_struct *nfit_wq;
static struct gen_pool *nfit_pool;
+static const char zero_key[NVDIMM_PASSPHRASE_LEN];
+
static struct nfit_test *to_nfit_test(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1059,8 +1054,7 @@ static int nd_intel_test_cmd_secure_erase(struct nfit_test *t,
struct device *dev = &t->pdev.dev;
struct nfit_test_sec *sec = &dimm_sec_info[dimm];
- if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED) ||
- (sec->state & ND_INTEL_SEC_STATE_FROZEN)) {
+ if (sec->state & ND_INTEL_SEC_STATE_FROZEN) {
nd_cmd->status = ND_INTEL_STATUS_INVALID_STATE;
dev_dbg(dev, "secure erase: wrong security state\n");
} else if (memcmp(nd_cmd->passphrase, sec->passphrase,
@@ -1068,6 +1062,12 @@ static int nd_intel_test_cmd_secure_erase(struct nfit_test *t,
nd_cmd->status = ND_INTEL_STATUS_INVALID_PASS;
dev_dbg(dev, "secure erase: wrong passphrase\n");
} else {
+ if (!(sec->state & ND_INTEL_SEC_STATE_ENABLED)
+ && (memcmp(nd_cmd->passphrase, zero_key,
+ ND_INTEL_PASSPHRASE_SIZE) != 0)) {
+ dev_dbg(dev, "invalid zero key\n");
+ return 0;
+ }
memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
memset(sec->master_passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
sec->state = 0;
@@ -1093,7 +1093,7 @@ static int nd_intel_test_cmd_overwrite(struct nfit_test *t,
return 0;
}
- memset(sec->passphrase, 0, ND_INTEL_PASSPHRASE_SIZE);
+ sec->old_state = sec->state;
sec->state = ND_INTEL_SEC_STATE_OVERWRITE;
dev_dbg(dev, "overwrite progressing.\n");
sec->overwrite_end_time = get_jiffies_64() + 5 * HZ;
@@ -1115,7 +1115,8 @@ static int nd_intel_test_cmd_query_overwrite(struct nfit_test *t,
if (time_is_before_jiffies64(sec->overwrite_end_time)) {
sec->overwrite_end_time = 0;
- sec->state = 0;
+ sec->state = sec->old_state;
+ sec->old_state = 0;
sec->ext_state = ND_INTEL_SEC_ESTATE_ENABLED;
dev_dbg(dev, "overwrite is complete\n");
} else
@@ -3162,6 +3163,9 @@ static __init int nfit_test_init(void)
acpi_nfit_test();
device_dax_test();
mcsafe_test();
+ dax_pmem_test();
+ dax_pmem_core_test();
+ dax_pmem_compat_test();
nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm);
diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
index ade14fe3837e..448d686da8b1 100644
--- a/tools/testing/nvdimm/test/nfit_test.h
+++ b/tools/testing/nvdimm/test/nfit_test.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef __NFIT_TEST_H__
#define __NFIT_TEST_H__
diff --git a/tools/testing/nvdimm/watermark.h b/tools/testing/nvdimm/watermark.h
index ed0528757bd4..43fc4f3e7927 100644
--- a/tools/testing/nvdimm/watermark.h
+++ b/tools/testing/nvdimm/watermark.h
@@ -6,6 +6,9 @@ int pmem_test(void);
int libnvdimm_test(void);
int acpi_nfit_test(void);
int device_dax_test(void);
+int dax_pmem_test(void);
+int dax_pmem_core_test(void);
+int dax_pmem_compat_test(void);
/*
* dummy routine for nfit_test to validate it is linking to the properly
diff --git a/tools/testing/radix-tree/benchmark.c b/tools/testing/radix-tree/benchmark.c
index 7e195ed8e92d..523c79f22ed3 100644
--- a/tools/testing/radix-tree/benchmark.c
+++ b/tools/testing/radix-tree/benchmark.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* benchmark.c:
* Author: Konstantin Khlebnikov <koct9i@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/radix-tree.h>
#include <linux/slab.h>
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
index 1b63bdb7688f..8995092d541e 100644
--- a/tools/testing/radix-tree/idr-test.c
+++ b/tools/testing/radix-tree/idr-test.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* idr-test.c: Test the IDR API
* Copyright (c) 2016 Matthew Wilcox <willy@infradead.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/bitmap.h>
#include <linux/idr.h>
@@ -287,6 +279,51 @@ static void idr_align_test(struct idr *idr)
}
}
+DEFINE_IDR(find_idr);
+
+static void *idr_throbber(void *arg)
+{
+ time_t start = time(NULL);
+ int id = *(int *)arg;
+
+ rcu_register_thread();
+ do {
+ idr_alloc(&find_idr, xa_mk_value(id), id, id + 1, GFP_KERNEL);
+ idr_remove(&find_idr, id);
+ } while (time(NULL) < start + 10);
+ rcu_unregister_thread();
+
+ return NULL;
+}
+
+void idr_find_test_1(int anchor_id, int throbber_id)
+{
+ pthread_t throbber;
+ time_t start = time(NULL);
+
+ pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
+
+ BUG_ON(idr_alloc(&find_idr, xa_mk_value(anchor_id), anchor_id,
+ anchor_id + 1, GFP_KERNEL) != anchor_id);
+
+ do {
+ int id = 0;
+ void *entry = idr_get_next(&find_idr, &id);
+ BUG_ON(entry != xa_mk_value(id));
+ } while (time(NULL) < start + 11);
+
+ pthread_join(throbber, NULL);
+
+ idr_remove(&find_idr, anchor_id);
+ BUG_ON(!idr_is_empty(&find_idr));
+}
+
+void idr_find_test(void)
+{
+ idr_find_test_1(100000, 0);
+ idr_find_test_1(0, 100000);
+}
+
void idr_checks(void)
{
unsigned long i;
@@ -368,6 +405,7 @@ void idr_checks(void)
idr_u32_test(1);
idr_u32_test(0);
idr_align_test(&idr);
+ idr_find_test();
}
#define module_init(x)
diff --git a/tools/testing/radix-tree/iteration_check.c b/tools/testing/radix-tree/iteration_check.c
index 238db187aa15..e9908bcb06dd 100644
--- a/tools/testing/radix-tree/iteration_check.c
+++ b/tools/testing/radix-tree/iteration_check.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* iteration_check.c: test races having to do with xarray iteration
* Copyright (c) 2016 Intel Corporation
* Author: Ross Zwisler <ross.zwisler@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <pthread.h>
#include "test.h"
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
index ff27a74d9762..9eae0fb5a67d 100644
--- a/tools/testing/radix-tree/multiorder.c
+++ b/tools/testing/radix-tree/multiorder.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* multiorder.c: Multi-order radix tree entry testing
* Copyright (c) 2016 Intel Corporation
* Author: Ross Zwisler <ross.zwisler@linux.intel.com>
* Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/radix-tree.h>
#include <linux/slab.h>
diff --git a/tools/testing/scatterlist/Makefile b/tools/testing/scatterlist/Makefile
index 933c3a6e4d77..cbb003d9305e 100644
--- a/tools/testing/scatterlist/Makefile
+++ b/tools/testing/scatterlist/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
CFLAGS += -I. -I../../include -g -O2 -Wall -fsanitize=address
LDFLAGS += -fsanitize=address -fsanitize=undefined
TARGETS = main
diff --git a/tools/testing/selftests/.gitignore b/tools/testing/selftests/.gitignore
index 91750352459d..8059ce834247 100644
--- a/tools/testing/selftests/.gitignore
+++ b/tools/testing/selftests/.gitignore
@@ -1,4 +1,3 @@
-kselftest
gpiogpio-event-mon
gpiogpio-hammer
gpioinclude/
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 400ee81a3043..9781ca79794a 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -15,13 +15,14 @@ TARGETS += firmware
TARGETS += ftrace
TARGETS += futex
TARGETS += gpio
-TARGETS += ima
TARGETS += intel_pstate
TARGETS += ipc
TARGETS += ir
TARGETS += kcmp
+TARGETS += kexec
TARGETS += kvm
TARGETS += lib
+TARGETS += livepatch
TARGETS += membarrier
TARGETS += memfd
TARGETS += memory-hotplug
@@ -31,6 +32,7 @@ TARGETS += net
TARGETS += netfilter
TARGETS += networking/timestamping
TARGETS += nsfs
+TARGETS += pidfd
TARGETS += powerpc
TARGETS += proc
TARGETS += pstore
@@ -48,6 +50,8 @@ TARGETS += sysctl
ifneq (1, $(quicktest))
TARGETS += timers
endif
+TARGETS += tmpfs
+TARGETS += tpm2
TARGETS += user
TARGETS += vm
TARGETS += x86
@@ -67,26 +71,68 @@ override LDFLAGS =
override MAKEFLAGS =
endif
+# Append kselftest to KBUILD_OUTPUT to avoid cluttering
+# KBUILD_OUTPUT with selftest objects and headers installed
+# by selftests Makefile or lib.mk.
ifneq ($(KBUILD_SRC),)
override LDFLAGS =
endif
-BUILD := $(O)
-ifndef BUILD
- BUILD := $(KBUILD_OUTPUT)
+ifneq ($(O),)
+ BUILD := $(O)
+else
+ ifneq ($(KBUILD_OUTPUT),)
+ BUILD := $(KBUILD_OUTPUT)/kselftest
+ else
+ BUILD := $(shell pwd)
+ DEFAULT_INSTALL_HDR_PATH := 1
+ endif
endif
-ifndef BUILD
- BUILD := $(shell pwd)
+
+# Prepare for headers install
+top_srcdir ?= ../../..
+include $(top_srcdir)/scripts/subarch.include
+ARCH ?= $(SUBARCH)
+export KSFT_KHDR_INSTALL_DONE := 1
+export BUILD
+
+# build and run gpio when output directory is the src dir.
+# gpio has dependency on tools/gpio and builds tools/gpio
+# objects in the src directory in all cases making the src
+# repo dirty even when objects are relocated.
+ifneq (1,$(DEFAULT_INSTALL_HDR_PATH))
+ TMP := $(filter-out gpio, $(TARGETS))
+ TARGETS := $(TMP)
endif
-# KSFT_TAP_LEVEL is used from KSFT framework to prevent nested TAP header
-# printing from tests. Applicable to run_tests case where run_tests adds
-# TAP header prior running tests and when a test program invokes another
-# with system() call. Export it here to cover override RUN_TESTS defines.
-export KSFT_TAP_LEVEL=`echo 1`
+# set default goal to all, so make without a target runs all, even when
+# all isn't the first target in the file.
+.DEFAULT_GOAL := all
+
+# Install headers here once for all tests. KSFT_KHDR_INSTALL_DONE
+# is used to avoid running headers_install from lib.mk.
+# Invoke headers install with --no-builtin-rules to avoid circular
+# dependency in "make kselftest" case. In this case, second level
+# make inherits builtin-rules which will use the rule generate
+# Makefile.o and runs into
+# "Circular Makefile.o <- prepare dependency dropped."
+# and headers_install fails and test compile fails.
+#
+# O= KBUILD_OUTPUT cases don't run into this error, since main Makefile
+# invokes them as sub-makes and --no-builtin-rules is not necessary,
+# but doesn't cause any failures. Keep it simple and use the same
+# flags in both cases.
+# Local build cases: "make kselftest", "make -C" - headers are installed
+# in the default INSTALL_HDR_PATH usr/include.
+khdr:
+ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
+ make --no-builtin-rules ARCH=$(ARCH) -C $(top_srcdir) headers_install
+else
+ make --no-builtin-rules INSTALL_HDR_PATH=$$BUILD/usr \
+ ARCH=$(ARCH) -C $(top_srcdir) headers_install
+endif
-export BUILD
-all:
+all: khdr
@for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
mkdir $$BUILD_TARGET -p; \
@@ -120,14 +166,22 @@ clean_hotplug:
run_pstore_crash:
make -C pstore run_crash
-INSTALL_PATH ?= install
+# Use $BUILD as the default install root. $BUILD points to the
+# right output location for the following cases:
+# 1. output_dir=kernel_src
+# 2. a separate output directory is specified using O= KBUILD_OUTPUT
+# 3. a separate output directory is specified using KBUILD_OUTPUT
+#
+INSTALL_PATH ?= $(BUILD)/install
INSTALL_PATH := $(abspath $(INSTALL_PATH))
ALL_SCRIPT := $(INSTALL_PATH)/run_kselftest.sh
-install:
+install: all
ifdef INSTALL_PATH
@# Ask all targets to install their files
- mkdir -p $(INSTALL_PATH)
+ mkdir -p $(INSTALL_PATH)/kselftest
+ install -m 744 kselftest/runner.sh $(INSTALL_PATH)/kselftest/
+ install -m 744 kselftest/prefix.pl $(INSTALL_PATH)/kselftest/
@for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
make OUTPUT=$$BUILD_TARGET -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install; \
@@ -137,24 +191,20 @@ ifdef INSTALL_PATH
echo "#!/bin/sh" > $(ALL_SCRIPT)
echo "BASE_DIR=\$$(realpath \$$(dirname \$$0))" >> $(ALL_SCRIPT)
echo "cd \$$BASE_DIR" >> $(ALL_SCRIPT)
+ echo ". ./kselftest/runner.sh" >> $(ALL_SCRIPT)
echo "ROOT=\$$PWD" >> $(ALL_SCRIPT)
echo "if [ \"\$$1\" = \"--summary\" ]; then" >> $(ALL_SCRIPT)
- echo " OUTPUT=\$$BASE_DIR/output.log" >> $(ALL_SCRIPT)
- echo " cat /dev/null > \$$OUTPUT" >> $(ALL_SCRIPT)
- echo "else" >> $(ALL_SCRIPT)
- echo " OUTPUT=/dev/stdout" >> $(ALL_SCRIPT)
+ echo " logfile=\$$BASE_DIR/output.log" >> $(ALL_SCRIPT)
+ echo " cat /dev/null > \$$logfile" >> $(ALL_SCRIPT)
echo "fi" >> $(ALL_SCRIPT)
- echo "export KSFT_TAP_LEVEL=1" >> $(ALL_SCRIPT)
- echo "export skip=4" >> $(ALL_SCRIPT)
for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
- echo "echo ; echo TAP version 13" >> $(ALL_SCRIPT); \
- echo "echo Running tests in $$TARGET" >> $(ALL_SCRIPT); \
- echo "echo ========================================" >> $(ALL_SCRIPT); \
echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
echo "cd $$TARGET" >> $(ALL_SCRIPT); \
+ echo -n "run_many" >> $(ALL_SCRIPT); \
make -s --no-print-directory OUTPUT=$$BUILD_TARGET -C $$TARGET emit_tests >> $(ALL_SCRIPT); \
+ echo "" >> $(ALL_SCRIPT); \
echo "cd \$$ROOT" >> $(ALL_SCRIPT); \
done;
@@ -169,4 +219,4 @@ clean:
make OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\
done;
-.PHONY: all run_tests hotplug run_hotplug clean_hotplug run_pstore_crash install clean
+.PHONY: khdr all run_tests hotplug run_hotplug clean_hotplug run_pstore_crash install clean
diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile
index 72c25a3cb658..7c462714b418 100644
--- a/tools/testing/selftests/android/Makefile
+++ b/tools/testing/selftests/android/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
SUBDIRS := ion
TEST_PROGS := run.sh
diff --git a/tools/testing/selftests/android/ion/Makefile b/tools/testing/selftests/android/ion/Makefile
index 88cfe88e466f..0eb7ab626e1c 100644
--- a/tools/testing/selftests/android/ion/Makefile
+++ b/tools/testing/selftests/android/ion/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
INCLUDEDIR := -I. -I../../../../../drivers/staging/android/uapi/ -I../../../../../usr/include/
CFLAGS := $(CFLAGS) $(INCLUDEDIR) -Wall -O2 -g
diff --git a/tools/testing/selftests/android/ion/ion.h b/tools/testing/selftests/android/ion/ion.h
index f7021ac51335..33db23018abf 100644
--- a/tools/testing/selftests/android/ion/ion.h
+++ b/tools/testing/selftests/android/ion/ion.h
@@ -1,17 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ion.h
*
* Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
/* This file is copied from drivers/staging/android/uapi/ion.h
diff --git a/tools/testing/selftests/android/ion/ionapp_export.c b/tools/testing/selftests/android/ion/ionapp_export.c
index b5fa0a2dc968..063b7830d1bd 100644
--- a/tools/testing/selftests/android/ion/ionapp_export.c
+++ b/tools/testing/selftests/android/ion/ionapp_export.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* ionapp_export.c
*
@@ -7,16 +8,6 @@
* So, this server has to be started first before the client.
*
* Copyright (C) 2017 Pintu Kumar <pintu.ping@gmail.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/android/ion/ionapp_import.c b/tools/testing/selftests/android/ion/ionapp_import.c
index ae2d704cfa46..54b580cb04f6 100644
--- a/tools/testing/selftests/android/ion/ionapp_import.c
+++ b/tools/testing/selftests/android/ion/ionapp_import.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* ionapp_import.c
*
@@ -6,16 +7,6 @@
* This acts like a client for ionapp_export.
*
* Copyright (C) 2017 Pintu Kumar <pintu.ping@gmail.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index dd093bd91aa9..dd5d69529382 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -14,6 +14,7 @@ feature
test_libbpf_open
test_sock
test_sock_addr
+test_sock_fields
urandom_read
test_btf
test_sockmap
@@ -29,3 +30,8 @@ test_netcnt
test_section_names
test_tcpnotify_user
test_libbpf
+test_tcp_check_syncookie_user
+test_sysctl
+alu32
+libbpf.pc
+libbpf.so.*
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 41ab7a3668b3..e36356e2377e 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -10,36 +10,33 @@ ifneq ($(wildcard $(GENHDR)),)
GENFLAGS := -DHAVE_GENHDR
endif
+CLANG ?= clang
+LLC ?= llc
+LLVM_OBJCOPY ?= llvm-objcopy
+LLVM_READELF ?= llvm-readelf
+BTF_PAHOLE ?= pahole
CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(BPFDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
LDLIBS += -lcap -lelf -lrt -lpthread
-TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
-all: $(TEST_CUSTOM_PROGS)
-
-$(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c
- $(CC) -o $(TEST_CUSTOM_PROGS) -static $< -Wl,--build-id
-
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
- test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \
- test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \
- test_netcnt test_tcpnotify_user
-
-TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
- test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
- sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \
- test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o \
- test_tcpnotify_kern.o \
- sample_map_ret0.o test_tcpbpf_kern.o test_stacktrace_build_id.o \
- sockmap_tcp_msg_prog.o connect4_prog.o connect6_prog.o test_adjust_tail.o \
- test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o test_tunnel_kern.o \
- test_get_stack_rawtp.o test_sockmap_kern.o test_sockhash_kern.o \
- test_lwt_seg6local.o sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \
- get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \
- test_skb_cgroup_id_kern.o bpf_flow.o netcnt_prog.o \
- test_sk_lookup_kern.o test_xdp_vlan.o test_queue_map.o test_stack_map.o \
- xdp_dummy.o test_map_in_map.o
+ test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
+ test_cgroup_storage test_select_reuseport test_section_names \
+ test_netcnt test_tcpnotify_user test_sock_fields test_sysctl
+
+BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
+TEST_GEN_FILES = $(BPF_OBJ_FILES)
+
+# Also test sub-register code-gen if LLVM has eBPF v3 processor support which
+# contains both ALU32 and JMP32 instructions.
+SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \
+ $(CLANG) -target bpf -O2 -emit-llvm -S -x c - -o - | \
+ $(LLC) -mattr=+alu32 -mcpu=v3 2>&1 | \
+ grep 'if w')
+ifneq ($(SUBREG_CODEGEN),)
+TEST_GEN_FILES += $(patsubst %.o,alu32/%.o, $(BPF_OBJ_FILES))
+endif
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
@@ -53,7 +50,11 @@ TEST_PROGS := test_kmod.sh \
test_lirc_mode2.sh \
test_skb_cgroup_id.sh \
test_flow_dissector.sh \
- test_xdp_vlan.sh
+ test_xdp_vlan.sh \
+ test_lwt_ip_encap.sh \
+ test_tcp_check_syncookie.sh \
+ test_tc_tunnel.sh \
+ test_tc_edt.sh
TEST_PROGS_EXTENDED := with_addr.sh \
with_tunnels.sh \
@@ -62,10 +63,20 @@ TEST_PROGS_EXTENDED := with_addr.sh \
# Compile but not part of 'make run_tests'
TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
- flow_dissector_load test_flow_dissector
+ flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
+ test_lirc_mode2_user
include ../lib.mk
+# NOTE: $(OUTPUT) won't get default value if used before lib.mk
+TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
+all: $(TEST_CUSTOM_PROGS)
+
+$(OUTPUT)/urandom_read: $(OUTPUT)/%: %.c
+ $(CC) -o $@ $< -Wl,--build-id
+
+$(OUTPUT)/test_maps: map_tests/*.c
+
BPFOBJ := $(OUTPUT)/libbpf.a
$(TEST_GEN_PROGS): $(BPFOBJ)
@@ -84,6 +95,8 @@ $(OUTPUT)/test_progs: trace_helpers.c
$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c
$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c
$(OUTPUT)/test_netcnt: cgroup_helpers.c
+$(OUTPUT)/test_sock_fields: cgroup_helpers.c
+$(OUTPUT)/test_sysctl: cgroup_helpers.c
.PHONY: force
@@ -93,11 +106,6 @@ force:
$(BPFOBJ): force
$(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/
-CLANG ?= clang
-LLC ?= llc
-LLVM_OBJCOPY ?= llvm-objcopy
-BTF_PAHOLE ?= pahole
-
PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
# Let newer LLVM versions transparently probe the kernel for availability
@@ -127,12 +135,15 @@ $(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
$(OUTPUT)/test_queue_map.o: test_queue_stack_map.h
$(OUTPUT)/test_stack_map.o: test_queue_stack_map.h
+$(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h
+$(OUTPUT)/test_progs.o: flow_dissector_load.h
+
BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
$(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \
- readelf -S ./llvm_btf_verify.o | grep BTF; \
+ $(LLVM_READELF) -S ./llvm_btf_verify.o | grep BTF; \
/bin/rm -f ./llvm_btf_verify.o)
ifneq ($(BTF_LLVM_PROBE),)
@@ -149,9 +160,43 @@ endif
endif
endif
+TEST_PROGS_CFLAGS := -I. -I$(OUTPUT)
+TEST_VERIFIER_CFLAGS := -I. -I$(OUTPUT) -Iverifier
+
+ifneq ($(SUBREG_CODEGEN),)
+ALU32_BUILD_DIR = $(OUTPUT)/alu32
+TEST_CUSTOM_PROGS += $(ALU32_BUILD_DIR)/test_progs_32
+$(ALU32_BUILD_DIR):
+ mkdir -p $@
+
+$(ALU32_BUILD_DIR)/urandom_read: $(OUTPUT)/urandom_read
+ cp $< $@
+
+$(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(OUTPUT)/libbpf.a\
+ $(ALU32_BUILD_DIR) \
+ $(ALU32_BUILD_DIR)/urandom_read
+ $(CC) $(TEST_PROGS_CFLAGS) $(CFLAGS) \
+ -o $(ALU32_BUILD_DIR)/test_progs_32 \
+ test_progs.c trace_helpers.c prog_tests/*.c \
+ $(OUTPUT)/libbpf.a $(LDLIBS)
+
+$(ALU32_BUILD_DIR)/test_progs_32: $(PROG_TESTS_H)
+$(ALU32_BUILD_DIR)/test_progs_32: prog_tests/*.c
+
+$(ALU32_BUILD_DIR)/%.o: progs/%.c $(ALU32_BUILD_DIR) \
+ $(ALU32_BUILD_DIR)/test_progs_32
+ $(CLANG) $(CLANG_FLAGS) \
+ -O2 -target bpf -emit-llvm -c $< -o - | \
+ $(LLC) -march=bpf -mattr=+alu32 -mcpu=$(CPU) $(LLC_FLAGS) \
+ -filetype=obj -o $@
+ifeq ($(DWARF2BTF),y)
+ $(BTF_PAHOLE) -J $@
+endif
+endif
+
# Have one program compiled without "-target bpf" to test whether libbpf loads
# it successfully
-$(OUTPUT)/test_xdp.o: test_xdp.c
+$(OUTPUT)/test_xdp.o: progs/test_xdp.c
$(CLANG) $(CLANG_FLAGS) \
-O2 -emit-llvm -c $< -o - | \
$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
@@ -159,7 +204,7 @@ ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
endif
-$(OUTPUT)/%.o: %.c
+$(OUTPUT)/%.o: progs/%.c
$(CLANG) $(CLANG_FLAGS) \
-O2 -target bpf -emit-llvm -c $< -o - | \
$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
@@ -167,4 +212,67 @@ ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
endif
-EXTRA_CLEAN := $(TEST_CUSTOM_PROGS)
+PROG_TESTS_H := $(OUTPUT)/prog_tests/tests.h
+test_progs.c: $(PROG_TESTS_H)
+$(OUTPUT)/test_progs: CFLAGS += $(TEST_PROGS_CFLAGS)
+$(OUTPUT)/test_progs: prog_tests/*.c
+
+PROG_TESTS_DIR = $(OUTPUT)/prog_tests
+$(PROG_TESTS_DIR):
+ mkdir -p $@
+
+PROG_TESTS_FILES := $(wildcard prog_tests/*.c)
+$(PROG_TESTS_H): $(PROG_TESTS_DIR) $(PROG_TESTS_FILES)
+ $(shell ( cd prog_tests/; \
+ echo '/* Generated header, do not edit */'; \
+ echo '#ifdef DECLARE'; \
+ ls *.c 2> /dev/null | \
+ sed -e 's@\([^\.]*\)\.c@extern void test_\1(void);@'; \
+ echo '#endif'; \
+ echo '#ifdef CALL'; \
+ ls *.c 2> /dev/null | \
+ sed -e 's@\([^\.]*\)\.c@test_\1();@'; \
+ echo '#endif' \
+ ) > $(PROG_TESTS_H))
+
+TEST_MAPS_CFLAGS := -I. -I$(OUTPUT)
+MAP_TESTS_DIR = $(OUTPUT)/map_tests
+$(MAP_TESTS_DIR):
+ mkdir -p $@
+MAP_TESTS_H := $(MAP_TESTS_DIR)/tests.h
+test_maps.c: $(MAP_TESTS_H)
+$(OUTPUT)/test_maps: CFLAGS += $(TEST_MAPS_CFLAGS)
+MAP_TESTS_FILES := $(wildcard map_tests/*.c)
+$(MAP_TESTS_H): $(MAP_TESTS_DIR) $(MAP_TESTS_FILES)
+ $(shell ( cd map_tests/; \
+ echo '/* Generated header, do not edit */'; \
+ echo '#ifdef DECLARE'; \
+ ls *.c 2> /dev/null | \
+ sed -e 's@\([^\.]*\)\.c@extern void test_\1(void);@'; \
+ echo '#endif'; \
+ echo '#ifdef CALL'; \
+ ls *.c 2> /dev/null | \
+ sed -e 's@\([^\.]*\)\.c@test_\1();@'; \
+ echo '#endif' \
+ ) > $(MAP_TESTS_H))
+
+VERIFIER_TESTS_H := $(OUTPUT)/verifier/tests.h
+test_verifier.c: $(VERIFIER_TESTS_H)
+$(OUTPUT)/test_verifier: CFLAGS += $(TEST_VERIFIER_CFLAGS)
+
+VERIFIER_TESTS_DIR = $(OUTPUT)/verifier
+$(VERIFIER_TESTS_DIR):
+ mkdir -p $@
+
+VERIFIER_TEST_FILES := $(wildcard verifier/*.c)
+$(OUTPUT)/verifier/tests.h: $(VERIFIER_TESTS_DIR) $(VERIFIER_TEST_FILES)
+ $(shell ( cd verifier/; \
+ echo '/* Generated header, do not edit */'; \
+ echo '#ifdef FILL_ARRAY'; \
+ ls *.c 2> /dev/null | \
+ sed -e 's@\(.*\)@#include \"\1\"@'; \
+ echo '#endif' \
+ ) > $(VERIFIER_TESTS_H))
+
+EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR) \
+ $(VERIFIER_TESTS_H) $(PROG_TESTS_H) $(MAP_TESTS_H)
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index 6c77cf7bedce..5f6f9e7aba2a 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -9,14 +9,14 @@
#define SEC(NAME) __attribute__((section(NAME), used))
/* helper functions called from eBPF programs written in C */
-static void *(*bpf_map_lookup_elem)(void *map, void *key) =
+static void *(*bpf_map_lookup_elem)(void *map, const void *key) =
(void *) BPF_FUNC_map_lookup_elem;
-static int (*bpf_map_update_elem)(void *map, void *key, void *value,
+static int (*bpf_map_update_elem)(void *map, const void *key, const void *value,
unsigned long long flags) =
(void *) BPF_FUNC_map_update_elem;
-static int (*bpf_map_delete_elem)(void *map, void *key) =
+static int (*bpf_map_delete_elem)(void *map, const void *key) =
(void *) BPF_FUNC_map_delete_elem;
-static int (*bpf_map_push_elem)(void *map, void *value,
+static int (*bpf_map_push_elem)(void *map, const void *value,
unsigned long long flags) =
(void *) BPF_FUNC_map_push_elem;
static int (*bpf_map_pop_elem)(void *map, void *value) =
@@ -159,6 +159,11 @@ static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
int size, unsigned long long netns_id,
unsigned long long flags) =
(void *) BPF_FUNC_sk_lookup_tcp;
+static struct bpf_sock *(*bpf_skc_lookup_tcp)(void *ctx,
+ struct bpf_sock_tuple *tuple,
+ int size, unsigned long long netns_id,
+ unsigned long long flags) =
+ (void *) BPF_FUNC_skc_lookup_tcp;
static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
struct bpf_sock_tuple *tuple,
int size, unsigned long long netns_id,
@@ -172,6 +177,45 @@ static int (*bpf_skb_vlan_pop)(void *ctx) =
(void *) BPF_FUNC_skb_vlan_pop;
static int (*bpf_rc_pointer_rel)(void *ctx, int rel_x, int rel_y) =
(void *) BPF_FUNC_rc_pointer_rel;
+static void (*bpf_spin_lock)(struct bpf_spin_lock *lock) =
+ (void *) BPF_FUNC_spin_lock;
+static void (*bpf_spin_unlock)(struct bpf_spin_lock *lock) =
+ (void *) BPF_FUNC_spin_unlock;
+static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) =
+ (void *) BPF_FUNC_sk_fullsock;
+static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) =
+ (void *) BPF_FUNC_tcp_sock;
+static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) =
+ (void *) BPF_FUNC_get_listener_sock;
+static int (*bpf_skb_ecn_set_ce)(void *ctx) =
+ (void *) BPF_FUNC_skb_ecn_set_ce;
+static int (*bpf_tcp_check_syncookie)(struct bpf_sock *sk,
+ void *ip, int ip_len, void *tcp, int tcp_len) =
+ (void *) BPF_FUNC_tcp_check_syncookie;
+static int (*bpf_sysctl_get_name)(void *ctx, char *buf,
+ unsigned long long buf_len,
+ unsigned long long flags) =
+ (void *) BPF_FUNC_sysctl_get_name;
+static int (*bpf_sysctl_get_current_value)(void *ctx, char *buf,
+ unsigned long long buf_len) =
+ (void *) BPF_FUNC_sysctl_get_current_value;
+static int (*bpf_sysctl_get_new_value)(void *ctx, char *buf,
+ unsigned long long buf_len) =
+ (void *) BPF_FUNC_sysctl_get_new_value;
+static int (*bpf_sysctl_set_new_value)(void *ctx, const char *buf,
+ unsigned long long buf_len) =
+ (void *) BPF_FUNC_sysctl_set_new_value;
+static int (*bpf_strtol)(const char *buf, unsigned long long buf_len,
+ unsigned long long flags, long *res) =
+ (void *) BPF_FUNC_strtol;
+static int (*bpf_strtoul)(const char *buf, unsigned long long buf_len,
+ unsigned long long flags, unsigned long *res) =
+ (void *) BPF_FUNC_strtoul;
+static void *(*bpf_sk_storage_get)(void *map, struct bpf_sock *sk,
+ void *value, __u64 flags) =
+ (void *) BPF_FUNC_sk_storage_get;
+static int (*bpf_sk_storage_delete)(void *map, struct bpf_sock *sk) =
+ (void *)BPF_FUNC_sk_storage_delete;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
@@ -224,6 +268,36 @@ static int (*bpf_skb_change_head)(void *, int len, int flags) =
(void *) BPF_FUNC_skb_change_head;
static int (*bpf_skb_pull_data)(void *, int len) =
(void *) BPF_FUNC_skb_pull_data;
+static unsigned int (*bpf_get_cgroup_classid)(void *ctx) =
+ (void *) BPF_FUNC_get_cgroup_classid;
+static unsigned int (*bpf_get_route_realm)(void *ctx) =
+ (void *) BPF_FUNC_get_route_realm;
+static int (*bpf_skb_change_proto)(void *ctx, __be16 proto, __u64 flags) =
+ (void *) BPF_FUNC_skb_change_proto;
+static int (*bpf_skb_change_type)(void *ctx, __u32 type) =
+ (void *) BPF_FUNC_skb_change_type;
+static unsigned int (*bpf_get_hash_recalc)(void *ctx) =
+ (void *) BPF_FUNC_get_hash_recalc;
+static unsigned long long (*bpf_get_current_task)(void) =
+ (void *) BPF_FUNC_get_current_task;
+static int (*bpf_skb_change_tail)(void *ctx, __u32 len, __u64 flags) =
+ (void *) BPF_FUNC_skb_change_tail;
+static long long (*bpf_csum_update)(void *ctx, __u32 csum) =
+ (void *) BPF_FUNC_csum_update;
+static void (*bpf_set_hash_invalid)(void *ctx) =
+ (void *) BPF_FUNC_set_hash_invalid;
+static int (*bpf_get_numa_node_id)(void) =
+ (void *) BPF_FUNC_get_numa_node_id;
+static int (*bpf_probe_read_str)(void *ctx, __u32 size,
+ const void *unsafe_ptr) =
+ (void *) BPF_FUNC_probe_read_str;
+static unsigned int (*bpf_get_socket_uid)(void *ctx) =
+ (void *) BPF_FUNC_get_socket_uid;
+static unsigned int (*bpf_set_hash)(void *ctx, __u32 hash) =
+ (void *) BPF_FUNC_set_hash;
+static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
+ unsigned long long flags) =
+ (void *) BPF_FUNC_skb_adjust_room;
/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
#if defined(__TARGET_ARCH_x86)
@@ -232,6 +306,9 @@ static int (*bpf_skb_pull_data)(void *, int len) =
#elif defined(__TARGET_ARCH_s930x)
#define bpf_target_s930x
#define bpf_target_defined
+#elif defined(__TARGET_ARCH_arm)
+ #define bpf_target_arm
+ #define bpf_target_defined
#elif defined(__TARGET_ARCH_arm64)
#define bpf_target_arm64
#define bpf_target_defined
@@ -254,6 +331,8 @@ static int (*bpf_skb_pull_data)(void *, int len) =
#define bpf_target_x86
#elif defined(__s390x__)
#define bpf_target_s930x
+#elif defined(__arm__)
+ #define bpf_target_arm
#elif defined(__aarch64__)
#define bpf_target_arm64
#elif defined(__mips__)
@@ -291,6 +370,19 @@ static int (*bpf_skb_pull_data)(void *, int len) =
#define PT_REGS_SP(x) ((x)->gprs[15])
#define PT_REGS_IP(x) ((x)->psw.addr)
+#elif defined(bpf_target_arm)
+
+#define PT_REGS_PARM1(x) ((x)->uregs[0])
+#define PT_REGS_PARM2(x) ((x)->uregs[1])
+#define PT_REGS_PARM3(x) ((x)->uregs[2])
+#define PT_REGS_PARM4(x) ((x)->uregs[3])
+#define PT_REGS_PARM5(x) ((x)->uregs[4])
+#define PT_REGS_RET(x) ((x)->uregs[14])
+#define PT_REGS_FP(x) ((x)->uregs[11]) /* Works only with CONFIG_FRAME_POINTER */
+#define PT_REGS_RC(x) ((x)->uregs[0])
+#define PT_REGS_SP(x) ((x)->uregs[13])
+#define PT_REGS_IP(x) ((x)->uregs[12])
+
#elif defined(bpf_target_arm64)
#define PT_REGS_PARM1(x) ((x)->regs[0])
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
index 84fd6f1bf33e..a29206ebbd13 100644
--- a/tools/testing/selftests/bpf/bpf_util.h
+++ b/tools/testing/selftests/bpf/bpf_util.h
@@ -58,4 +58,13 @@ static inline unsigned int bpf_num_possible_cpus(void)
# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
+#ifndef sizeof_field
+#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
+#endif
+
+#ifndef offsetofend
+#define offsetofend(TYPE, MEMBER) \
+ (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
+#endif
+
#endif /* __BPF_UTIL__ */
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 37f947ec44ed..f7a0744db31e 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -23,3 +23,14 @@ CONFIG_LWTUNNEL=y
CONFIG_BPF_STREAM_PARSER=y
CONFIG_XDP_SOCKETS=y
CONFIG_FTRACE_SYSCALLS=y
+CONFIG_IPV6_TUNNEL=y
+CONFIG_IPV6_GRE=y
+CONFIG_NET_FOU=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_IPV6_FOU=m
+CONFIG_IPV6_FOU_TUNNEL=m
+CONFIG_MPLS=y
+CONFIG_NET_MPLS_GSO=m
+CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
+CONFIG_IPV6_SIT=m
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c
index ae8180b11d5f..3fd83b9dc1bf 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.c
+++ b/tools/testing/selftests/bpf/flow_dissector_load.c
@@ -12,6 +12,7 @@
#include <bpf/libbpf.h>
#include "bpf_rlimit.h"
+#include "flow_dissector_load.h"
const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector";
const char *cfg_map_name = "jmp_table";
@@ -21,46 +22,13 @@ char *cfg_path_name;
static void load_and_attach_program(void)
{
- struct bpf_program *prog, *main_prog;
- struct bpf_map *prog_array;
- int i, fd, prog_fd, ret;
+ int prog_fd, ret;
struct bpf_object *obj;
- int prog_array_fd;
- ret = bpf_prog_load(cfg_path_name, BPF_PROG_TYPE_FLOW_DISSECTOR, &obj,
- &prog_fd);
+ ret = bpf_flow_load(&obj, cfg_path_name, cfg_section_name,
+ cfg_map_name, NULL, &prog_fd, NULL);
if (ret)
- error(1, 0, "bpf_prog_load %s", cfg_path_name);
-
- main_prog = bpf_object__find_program_by_title(obj, cfg_section_name);
- if (!main_prog)
- error(1, 0, "bpf_object__find_program_by_title %s",
- cfg_section_name);
-
- prog_fd = bpf_program__fd(main_prog);
- if (prog_fd < 0)
- error(1, 0, "bpf_program__fd");
-
- prog_array = bpf_object__find_map_by_name(obj, cfg_map_name);
- if (!prog_array)
- error(1, 0, "bpf_object__find_map_by_name %s", cfg_map_name);
-
- prog_array_fd = bpf_map__fd(prog_array);
- if (prog_array_fd < 0)
- error(1, 0, "bpf_map__fd %s", cfg_map_name);
-
- i = 0;
- bpf_object__for_each_program(prog, obj) {
- fd = bpf_program__fd(prog);
- if (fd < 0)
- error(1, 0, "bpf_program__fd");
-
- if (fd != prog_fd) {
- printf("%d: %s\n", i, bpf_program__title(prog, false));
- bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY);
- ++i;
- }
- }
+ error(1, 0, "bpf_flow_load %s", cfg_path_name);
ret = bpf_prog_attach(prog_fd, 0 /* Ignore */, BPF_FLOW_DISSECTOR, 0);
if (ret)
@@ -69,7 +37,6 @@ static void load_and_attach_program(void)
ret = bpf_object__pin(obj, cfg_pin_path);
if (ret)
error(1, 0, "bpf_object__pin %s", cfg_pin_path);
-
}
static void detach_program(void)
@@ -85,7 +52,7 @@ static void detach_program(void)
sprintf(command, "rm -r %s", cfg_pin_path);
ret = system(command);
if (ret)
- error(1, errno, command);
+ error(1, errno, "%s", command);
}
static void parse_opts(int argc, char **argv)
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.h b/tools/testing/selftests/bpf/flow_dissector_load.h
new file mode 100644
index 000000000000..daeaeb518894
--- /dev/null
+++ b/tools/testing/selftests/bpf/flow_dissector_load.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef FLOW_DISSECTOR_LOAD
+#define FLOW_DISSECTOR_LOAD
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+static inline int bpf_flow_load(struct bpf_object **obj,
+ const char *path,
+ const char *section_name,
+ const char *map_name,
+ const char *keys_map_name,
+ int *prog_fd,
+ int *keys_fd)
+{
+ struct bpf_program *prog, *main_prog;
+ struct bpf_map *prog_array, *keys;
+ int prog_array_fd;
+ int ret, fd, i;
+
+ ret = bpf_prog_load(path, BPF_PROG_TYPE_FLOW_DISSECTOR, obj,
+ prog_fd);
+ if (ret)
+ return ret;
+
+ main_prog = bpf_object__find_program_by_title(*obj, section_name);
+ if (!main_prog)
+ return -1;
+
+ *prog_fd = bpf_program__fd(main_prog);
+ if (*prog_fd < 0)
+ return -1;
+
+ prog_array = bpf_object__find_map_by_name(*obj, map_name);
+ if (!prog_array)
+ return -1;
+
+ prog_array_fd = bpf_map__fd(prog_array);
+ if (prog_array_fd < 0)
+ return -1;
+
+ if (keys_map_name && keys_fd) {
+ keys = bpf_object__find_map_by_name(*obj, keys_map_name);
+ if (!keys)
+ return -1;
+
+ *keys_fd = bpf_map__fd(keys);
+ if (*keys_fd < 0)
+ return -1;
+ }
+
+ i = 0;
+ bpf_object__for_each_program(prog, *obj) {
+ fd = bpf_program__fd(prog);
+ if (fd < 0)
+ return fd;
+
+ if (fd != *prog_fd) {
+ bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY);
+ ++i;
+ }
+ }
+
+ return 0;
+}
+
+#endif /* FLOW_DISSECTOR_LOAD */
diff --git a/tools/testing/selftests/bpf/map_tests/.gitignore b/tools/testing/selftests/bpf/map_tests/.gitignore
new file mode 100644
index 000000000000..45984a364647
--- /dev/null
+++ b/tools/testing/selftests/bpf/map_tests/.gitignore
@@ -0,0 +1 @@
+tests.h
diff --git a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
new file mode 100644
index 000000000000..e569edc679d8
--- /dev/null
+++ b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <linux/compiler.h>
+#include <linux/err.h>
+
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <linux/btf.h>
+#include <unistd.h>
+#include <signal.h>
+#include <errno.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include <test_btf.h>
+#include <test_maps.h>
+
+static struct bpf_create_map_attr xattr = {
+ .name = "sk_storage_map",
+ .map_type = BPF_MAP_TYPE_SK_STORAGE,
+ .map_flags = BPF_F_NO_PREALLOC,
+ .max_entries = 0,
+ .key_size = 4,
+ .value_size = 8,
+ .btf_key_type_id = 1,
+ .btf_value_type_id = 3,
+ .btf_fd = -1,
+};
+
+static unsigned int nr_sk_threads_done;
+static unsigned int nr_sk_threads_err;
+static unsigned int nr_sk_per_thread = 4096;
+static unsigned int nr_sk_threads = 4;
+static int sk_storage_map = -1;
+static unsigned int stop;
+static int runtime_s = 5;
+
+static bool is_stopped(void)
+{
+ return READ_ONCE(stop);
+}
+
+static unsigned int threads_err(void)
+{
+ return READ_ONCE(nr_sk_threads_err);
+}
+
+static void notify_thread_err(void)
+{
+ __sync_add_and_fetch(&nr_sk_threads_err, 1);
+}
+
+static bool wait_for_threads_err(void)
+{
+ while (!is_stopped() && !threads_err())
+ usleep(500);
+
+ return !is_stopped();
+}
+
+static unsigned int threads_done(void)
+{
+ return READ_ONCE(nr_sk_threads_done);
+}
+
+static void notify_thread_done(void)
+{
+ __sync_add_and_fetch(&nr_sk_threads_done, 1);
+}
+
+static void notify_thread_redo(void)
+{
+ __sync_sub_and_fetch(&nr_sk_threads_done, 1);
+}
+
+static bool wait_for_threads_done(void)
+{
+ while (threads_done() != nr_sk_threads && !is_stopped() &&
+ !threads_err())
+ usleep(50);
+
+ return !is_stopped() && !threads_err();
+}
+
+static bool wait_for_threads_redo(void)
+{
+ while (threads_done() && !is_stopped() && !threads_err())
+ usleep(50);
+
+ return !is_stopped() && !threads_err();
+}
+
+static bool wait_for_map(void)
+{
+ while (READ_ONCE(sk_storage_map) == -1 && !is_stopped())
+ usleep(50);
+
+ return !is_stopped();
+}
+
+static bool wait_for_map_close(void)
+{
+ while (READ_ONCE(sk_storage_map) != -1 && !is_stopped())
+ ;
+
+ return !is_stopped();
+}
+
+static int load_btf(void)
+{
+ const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
+ __u32 btf_raw_types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* struct bpf_spin_lock */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
+ BTF_MEMBER_ENC(15, 1, 0), /* int val; */
+ /* struct val */ /* [3] */
+ BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
+ BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
+ BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
+ };
+ struct btf_header btf_hdr = {
+ .magic = BTF_MAGIC,
+ .version = BTF_VERSION,
+ .hdr_len = sizeof(struct btf_header),
+ .type_len = sizeof(btf_raw_types),
+ .str_off = sizeof(btf_raw_types),
+ .str_len = sizeof(btf_str_sec),
+ };
+ __u8 raw_btf[sizeof(struct btf_header) + sizeof(btf_raw_types) +
+ sizeof(btf_str_sec)];
+
+ memcpy(raw_btf, &btf_hdr, sizeof(btf_hdr));
+ memcpy(raw_btf + sizeof(btf_hdr), btf_raw_types, sizeof(btf_raw_types));
+ memcpy(raw_btf + sizeof(btf_hdr) + sizeof(btf_raw_types),
+ btf_str_sec, sizeof(btf_str_sec));
+
+ return bpf_load_btf(raw_btf, sizeof(raw_btf), 0, 0, 0);
+}
+
+static int create_sk_storage_map(void)
+{
+ int btf_fd, map_fd;
+
+ btf_fd = load_btf();
+ CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n",
+ btf_fd, errno);
+ xattr.btf_fd = btf_fd;
+
+ map_fd = bpf_create_map_xattr(&xattr);
+ xattr.btf_fd = -1;
+ close(btf_fd);
+ CHECK(map_fd == -1,
+ "bpf_create_map_xattr()", "errno:%d\n", errno);
+
+ return map_fd;
+}
+
+static void *insert_close_thread(void *arg)
+{
+ struct {
+ int cnt;
+ int lock;
+ } value = { .cnt = 0xeB9F, .lock = 0, };
+ int i, map_fd, err, *sk_fds;
+
+ sk_fds = malloc(sizeof(*sk_fds) * nr_sk_per_thread);
+ if (!sk_fds) {
+ notify_thread_err();
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < nr_sk_per_thread; i++)
+ sk_fds[i] = -1;
+
+ while (!is_stopped()) {
+ if (!wait_for_map())
+ goto close_all;
+
+ map_fd = READ_ONCE(sk_storage_map);
+ for (i = 0; i < nr_sk_per_thread && !is_stopped(); i++) {
+ sk_fds[i] = socket(AF_INET6, SOCK_STREAM, 0);
+ if (sk_fds[i] == -1) {
+ err = -errno;
+ fprintf(stderr, "socket(): errno:%d\n", errno);
+ goto errout;
+ }
+ err = bpf_map_update_elem(map_fd, &sk_fds[i], &value,
+ BPF_NOEXIST);
+ if (err) {
+ err = -errno;
+ fprintf(stderr,
+ "bpf_map_update_elem(): errno:%d\n",
+ errno);
+ goto errout;
+ }
+ }
+
+ notify_thread_done();
+ wait_for_map_close();
+
+close_all:
+ for (i = 0; i < nr_sk_per_thread; i++) {
+ close(sk_fds[i]);
+ sk_fds[i] = -1;
+ }
+
+ notify_thread_redo();
+ }
+
+ free(sk_fds);
+ return NULL;
+
+errout:
+ for (i = 0; i < nr_sk_per_thread && sk_fds[i] != -1; i++)
+ close(sk_fds[i]);
+ free(sk_fds);
+ notify_thread_err();
+ return ERR_PTR(err);
+}
+
+static int do_sk_storage_map_stress_free(void)
+{
+ int i, map_fd = -1, err = 0, nr_threads_created = 0;
+ pthread_t *sk_thread_ids;
+ void *thread_ret;
+
+ sk_thread_ids = malloc(sizeof(pthread_t) * nr_sk_threads);
+ if (!sk_thread_ids) {
+ fprintf(stderr, "malloc(sk_threads): NULL\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_sk_threads; i++) {
+ err = pthread_create(&sk_thread_ids[i], NULL,
+ insert_close_thread, NULL);
+ if (err) {
+ err = -errno;
+ goto done;
+ }
+ nr_threads_created++;
+ }
+
+ while (!is_stopped()) {
+ map_fd = create_sk_storage_map();
+ WRITE_ONCE(sk_storage_map, map_fd);
+
+ if (!wait_for_threads_done())
+ break;
+
+ WRITE_ONCE(sk_storage_map, -1);
+ close(map_fd);
+ map_fd = -1;
+
+ if (!wait_for_threads_redo())
+ break;
+ }
+
+done:
+ WRITE_ONCE(stop, 1);
+ for (i = 0; i < nr_threads_created; i++) {
+ pthread_join(sk_thread_ids[i], &thread_ret);
+ if (IS_ERR(thread_ret) && !err) {
+ err = PTR_ERR(thread_ret);
+ fprintf(stderr, "threads#%u: err:%d\n", i, err);
+ }
+ }
+ free(sk_thread_ids);
+
+ if (map_fd != -1)
+ close(map_fd);
+
+ return err;
+}
+
+static void *update_thread(void *arg)
+{
+ struct {
+ int cnt;
+ int lock;
+ } value = { .cnt = 0xeB9F, .lock = 0, };
+ int map_fd = READ_ONCE(sk_storage_map);
+ int sk_fd = *(int *)arg;
+ int err = 0; /* Suppress compiler false alarm */
+
+ while (!is_stopped()) {
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
+ if (err && errno != EAGAIN) {
+ err = -errno;
+ fprintf(stderr, "bpf_map_update_elem: %d %d\n",
+ err, errno);
+ break;
+ }
+ }
+
+ if (!is_stopped()) {
+ notify_thread_err();
+ return ERR_PTR(err);
+ }
+
+ return NULL;
+}
+
+static void *delete_thread(void *arg)
+{
+ int map_fd = READ_ONCE(sk_storage_map);
+ int sk_fd = *(int *)arg;
+ int err = 0; /* Suppress compiler false alarm */
+
+ while (!is_stopped()) {
+ err = bpf_map_delete_elem(map_fd, &sk_fd);
+ if (err && errno != ENOENT) {
+ err = -errno;
+ fprintf(stderr, "bpf_map_delete_elem: %d %d\n",
+ err, errno);
+ break;
+ }
+ }
+
+ if (!is_stopped()) {
+ notify_thread_err();
+ return ERR_PTR(err);
+ }
+
+ return NULL;
+}
+
+static int do_sk_storage_map_stress_change(void)
+{
+ int i, sk_fd, map_fd = -1, err = 0, nr_threads_created = 0;
+ pthread_t *sk_thread_ids;
+ void *thread_ret;
+
+ sk_thread_ids = malloc(sizeof(pthread_t) * nr_sk_threads);
+ if (!sk_thread_ids) {
+ fprintf(stderr, "malloc(sk_threads): NULL\n");
+ return -ENOMEM;
+ }
+
+ sk_fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (sk_fd == -1) {
+ err = -errno;
+ goto done;
+ }
+
+ map_fd = create_sk_storage_map();
+ WRITE_ONCE(sk_storage_map, map_fd);
+
+ for (i = 0; i < nr_sk_threads; i++) {
+ if (i & 0x1)
+ err = pthread_create(&sk_thread_ids[i], NULL,
+ update_thread, &sk_fd);
+ else
+ err = pthread_create(&sk_thread_ids[i], NULL,
+ delete_thread, &sk_fd);
+ if (err) {
+ err = -errno;
+ goto done;
+ }
+ nr_threads_created++;
+ }
+
+ wait_for_threads_err();
+
+done:
+ WRITE_ONCE(stop, 1);
+ for (i = 0; i < nr_threads_created; i++) {
+ pthread_join(sk_thread_ids[i], &thread_ret);
+ if (IS_ERR(thread_ret) && !err) {
+ err = PTR_ERR(thread_ret);
+ fprintf(stderr, "threads#%u: err:%d\n", i, err);
+ }
+ }
+ free(sk_thread_ids);
+
+ if (sk_fd != -1)
+ close(sk_fd);
+ close(map_fd);
+
+ return err;
+}
+
+static void stop_handler(int signum)
+{
+ if (signum != SIGALRM)
+ printf("stopping...\n");
+ WRITE_ONCE(stop, 1);
+}
+
+#define BPF_SK_STORAGE_MAP_TEST_NR_THREADS "BPF_SK_STORAGE_MAP_TEST_NR_THREADS"
+#define BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD "BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD"
+#define BPF_SK_STORAGE_MAP_TEST_RUNTIME_S "BPF_SK_STORAGE_MAP_TEST_RUNTIME_S"
+#define BPF_SK_STORAGE_MAP_TEST_NAME "BPF_SK_STORAGE_MAP_TEST_NAME"
+
+static void test_sk_storage_map_stress_free(void)
+{
+ struct rlimit rlim_old, rlim_new = {};
+ int err;
+
+ getrlimit(RLIMIT_NOFILE, &rlim_old);
+
+ signal(SIGTERM, stop_handler);
+ signal(SIGINT, stop_handler);
+ if (runtime_s > 0) {
+ signal(SIGALRM, stop_handler);
+ alarm(runtime_s);
+ }
+
+ if (rlim_old.rlim_cur < nr_sk_threads * nr_sk_per_thread) {
+ rlim_new.rlim_cur = nr_sk_threads * nr_sk_per_thread + 128;
+ rlim_new.rlim_max = rlim_new.rlim_cur + 128;
+ err = setrlimit(RLIMIT_NOFILE, &rlim_new);
+ CHECK(err, "setrlimit(RLIMIT_NOFILE)", "rlim_new:%lu errno:%d",
+ rlim_new.rlim_cur, errno);
+ }
+
+ err = do_sk_storage_map_stress_free();
+
+ signal(SIGTERM, SIG_DFL);
+ signal(SIGINT, SIG_DFL);
+ if (runtime_s > 0) {
+ signal(SIGALRM, SIG_DFL);
+ alarm(0);
+ }
+
+ if (rlim_new.rlim_cur)
+ setrlimit(RLIMIT_NOFILE, &rlim_old);
+
+ CHECK(err, "test_sk_storage_map_stress_free", "err:%d\n", err);
+}
+
+static void test_sk_storage_map_stress_change(void)
+{
+ int err;
+
+ signal(SIGTERM, stop_handler);
+ signal(SIGINT, stop_handler);
+ if (runtime_s > 0) {
+ signal(SIGALRM, stop_handler);
+ alarm(runtime_s);
+ }
+
+ err = do_sk_storage_map_stress_change();
+
+ signal(SIGTERM, SIG_DFL);
+ signal(SIGINT, SIG_DFL);
+ if (runtime_s > 0) {
+ signal(SIGALRM, SIG_DFL);
+ alarm(0);
+ }
+
+ CHECK(err, "test_sk_storage_map_stress_change", "err:%d\n", err);
+}
+
+static void test_sk_storage_map_basic(void)
+{
+ struct {
+ int cnt;
+ int lock;
+ } value = { .cnt = 0xeB9f, .lock = 0, }, lookup_value;
+ struct bpf_create_map_attr bad_xattr;
+ int btf_fd, map_fd, sk_fd, err;
+
+ btf_fd = load_btf();
+ CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n",
+ btf_fd, errno);
+ xattr.btf_fd = btf_fd;
+
+ sk_fd = socket(AF_INET6, SOCK_STREAM, 0);
+ CHECK(sk_fd == -1, "socket()", "sk_fd:%d errno:%d\n",
+ sk_fd, errno);
+
+ map_fd = bpf_create_map_xattr(&xattr);
+ CHECK(map_fd == -1, "bpf_create_map_xattr(good_xattr)",
+ "map_fd:%d errno:%d\n", map_fd, errno);
+
+ /* Add new elem */
+ memcpy(&lookup_value, &value, sizeof(value));
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value,
+ BPF_NOEXIST | BPF_F_LOCK);
+ CHECK(err, "bpf_map_update_elem(BPF_NOEXIST|BPF_F_LOCK)",
+ "err:%d errno:%d\n", err, errno);
+ err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
+ BPF_F_LOCK);
+ CHECK(err || lookup_value.cnt != value.cnt,
+ "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
+ "err:%d errno:%d cnt:%x(%x)\n",
+ err, errno, lookup_value.cnt, value.cnt);
+
+ /* Bump the cnt and update with BPF_EXIST | BPF_F_LOCK */
+ value.cnt += 1;
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value,
+ BPF_EXIST | BPF_F_LOCK);
+ CHECK(err, "bpf_map_update_elem(BPF_EXIST|BPF_F_LOCK)",
+ "err:%d errno:%d\n", err, errno);
+ err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
+ BPF_F_LOCK);
+ CHECK(err || lookup_value.cnt != value.cnt,
+ "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
+ "err:%d errno:%d cnt:%x(%x)\n",
+ err, errno, lookup_value.cnt, value.cnt);
+
+ /* Bump the cnt and update with BPF_EXIST */
+ value.cnt += 1;
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value, BPF_EXIST);
+ CHECK(err, "bpf_map_update_elem(BPF_EXIST)",
+ "err:%d errno:%d\n", err, errno);
+ err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
+ BPF_F_LOCK);
+ CHECK(err || lookup_value.cnt != value.cnt,
+ "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
+ "err:%d errno:%d cnt:%x(%x)\n",
+ err, errno, lookup_value.cnt, value.cnt);
+
+ /* Update with BPF_NOEXIST */
+ value.cnt += 1;
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value,
+ BPF_NOEXIST | BPF_F_LOCK);
+ CHECK(!err || errno != EEXIST,
+ "bpf_map_update_elem(BPF_NOEXIST|BPF_F_LOCK)",
+ "err:%d errno:%d\n", err, errno);
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value, BPF_NOEXIST);
+ CHECK(!err || errno != EEXIST, "bpf_map_update_elem(BPF_NOEXIST)",
+ "err:%d errno:%d\n", err, errno);
+ value.cnt -= 1;
+ err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
+ BPF_F_LOCK);
+ CHECK(err || lookup_value.cnt != value.cnt,
+ "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
+ "err:%d errno:%d cnt:%x(%x)\n",
+ err, errno, lookup_value.cnt, value.cnt);
+
+ /* Bump the cnt again and update with map_flags == 0 */
+ value.cnt += 1;
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
+ CHECK(err, "bpf_map_update_elem()", "err:%d errno:%d\n",
+ err, errno);
+ err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
+ BPF_F_LOCK);
+ CHECK(err || lookup_value.cnt != value.cnt,
+ "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
+ "err:%d errno:%d cnt:%x(%x)\n",
+ err, errno, lookup_value.cnt, value.cnt);
+
+ /* Test delete elem */
+ err = bpf_map_delete_elem(map_fd, &sk_fd);
+ CHECK(err, "bpf_map_delete_elem()", "err:%d errno:%d\n",
+ err, errno);
+ err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
+ BPF_F_LOCK);
+ CHECK(!err || errno != ENOENT,
+ "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
+ "err:%d errno:%d\n", err, errno);
+ err = bpf_map_delete_elem(map_fd, &sk_fd);
+ CHECK(!err || errno != ENOENT, "bpf_map_delete_elem()",
+ "err:%d errno:%d\n", err, errno);
+
+ memcpy(&bad_xattr, &xattr, sizeof(xattr));
+ bad_xattr.btf_key_type_id = 0;
+ err = bpf_create_map_xattr(&bad_xattr);
+ CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
+ "err:%d errno:%d\n", err, errno);
+
+ memcpy(&bad_xattr, &xattr, sizeof(xattr));
+ bad_xattr.btf_key_type_id = 3;
+ err = bpf_create_map_xattr(&bad_xattr);
+ CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
+ "err:%d errno:%d\n", err, errno);
+
+ memcpy(&bad_xattr, &xattr, sizeof(xattr));
+ bad_xattr.max_entries = 1;
+ err = bpf_create_map_xattr(&bad_xattr);
+ CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
+ "err:%d errno:%d\n", err, errno);
+
+ memcpy(&bad_xattr, &xattr, sizeof(xattr));
+ bad_xattr.map_flags = 0;
+ err = bpf_create_map_xattr(&bad_xattr);
+ CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
+ "err:%d errno:%d\n", err, errno);
+
+ xattr.btf_fd = -1;
+ close(btf_fd);
+ close(map_fd);
+ close(sk_fd);
+}
+
+void test_sk_storage_map(void)
+{
+ const char *test_name, *env_opt;
+ bool test_ran = false;
+
+ test_name = getenv(BPF_SK_STORAGE_MAP_TEST_NAME);
+
+ env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_NR_THREADS);
+ if (env_opt)
+ nr_sk_threads = atoi(env_opt);
+
+ env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD);
+ if (env_opt)
+ nr_sk_per_thread = atoi(env_opt);
+
+ env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_RUNTIME_S);
+ if (env_opt)
+ runtime_s = atoi(env_opt);
+
+ if (!test_name || !strcmp(test_name, "basic")) {
+ test_sk_storage_map_basic();
+ test_ran = true;
+ }
+ if (!test_name || !strcmp(test_name, "stress_free")) {
+ test_sk_storage_map_stress_free();
+ test_ran = true;
+ }
+ if (!test_name || !strcmp(test_name, "stress_change")) {
+ test_sk_storage_map_stress_change();
+ test_ran = true;
+ }
+
+ if (test_ran)
+ printf("%s:PASS\n", __func__);
+ else
+ CHECK(1, "Invalid test_name", "%s\n", test_name);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/.gitignore b/tools/testing/selftests/bpf/prog_tests/.gitignore
new file mode 100644
index 000000000000..45984a364647
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/.gitignore
@@ -0,0 +1 @@
+tests.h
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
new file mode 100644
index 000000000000..cb827383db4d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_bpf_obj_id(void)
+{
+ const __u64 array_magic_value = 0xfaceb00c;
+ const __u32 array_key = 0;
+ const int nr_iters = 2;
+ const char *file = "./test_obj_id.o";
+ const char *expected_prog_name = "test_obj_id";
+ const char *expected_map_name = "test_map_id";
+ const __u64 nsec_per_sec = 1000000000;
+
+ struct bpf_object *objs[nr_iters];
+ int prog_fds[nr_iters], map_fds[nr_iters];
+ /* +1 to test for the info_len returned by kernel */
+ struct bpf_prog_info prog_infos[nr_iters + 1];
+ struct bpf_map_info map_infos[nr_iters + 1];
+ /* Each prog only uses one map. +1 to test nr_map_ids
+ * returned by kernel.
+ */
+ __u32 map_ids[nr_iters + 1];
+ char jited_insns[128], xlated_insns[128], zeros[128];
+ __u32 i, next_id, info_len, nr_id_found, duration = 0;
+ struct timespec real_time_ts, boot_time_ts;
+ int err = 0;
+ __u64 array_value;
+ uid_t my_uid = getuid();
+ time_t now, load_time;
+
+ err = bpf_prog_get_fd_by_id(0);
+ CHECK(err >= 0 || errno != ENOENT,
+ "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
+
+ err = bpf_map_get_fd_by_id(0);
+ CHECK(err >= 0 || errno != ENOENT,
+ "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
+
+ for (i = 0; i < nr_iters; i++)
+ objs[i] = NULL;
+
+ /* Check bpf_obj_get_info_by_fd() */
+ bzero(zeros, sizeof(zeros));
+ for (i = 0; i < nr_iters; i++) {
+ now = time(NULL);
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
+ &objs[i], &prog_fds[i]);
+ /* test_obj_id.o is a dumb prog. It should never fail
+ * to load.
+ */
+ if (err)
+ error_cnt++;
+ assert(!err);
+
+ /* Insert a magic value to the map */
+ map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
+ assert(map_fds[i] >= 0);
+ err = bpf_map_update_elem(map_fds[i], &array_key,
+ &array_magic_value, 0);
+ assert(!err);
+
+ /* Check getting map info */
+ info_len = sizeof(struct bpf_map_info) * 2;
+ bzero(&map_infos[i], info_len);
+ err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
+ &info_len);
+ if (CHECK(err ||
+ map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
+ map_infos[i].key_size != sizeof(__u32) ||
+ map_infos[i].value_size != sizeof(__u64) ||
+ map_infos[i].max_entries != 1 ||
+ map_infos[i].map_flags != 0 ||
+ info_len != sizeof(struct bpf_map_info) ||
+ strcmp((char *)map_infos[i].name, expected_map_name),
+ "get-map-info(fd)",
+ "err %d errno %d type %d(%d) info_len %u(%zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
+ err, errno,
+ map_infos[i].type, BPF_MAP_TYPE_ARRAY,
+ info_len, sizeof(struct bpf_map_info),
+ map_infos[i].key_size,
+ map_infos[i].value_size,
+ map_infos[i].max_entries,
+ map_infos[i].map_flags,
+ map_infos[i].name, expected_map_name))
+ goto done;
+
+ /* Check getting prog info */
+ info_len = sizeof(struct bpf_prog_info) * 2;
+ bzero(&prog_infos[i], info_len);
+ bzero(jited_insns, sizeof(jited_insns));
+ bzero(xlated_insns, sizeof(xlated_insns));
+ prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
+ prog_infos[i].jited_prog_len = sizeof(jited_insns);
+ prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
+ prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
+ prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
+ prog_infos[i].nr_map_ids = 2;
+ err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
+ assert(!err);
+ err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
+ assert(!err);
+ err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
+ &info_len);
+ load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
+ + (prog_infos[i].load_time / nsec_per_sec);
+ if (CHECK(err ||
+ prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
+ info_len != sizeof(struct bpf_prog_info) ||
+ (jit_enabled && !prog_infos[i].jited_prog_len) ||
+ (jit_enabled &&
+ !memcmp(jited_insns, zeros, sizeof(zeros))) ||
+ !prog_infos[i].xlated_prog_len ||
+ !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
+ load_time < now - 60 || load_time > now + 60 ||
+ prog_infos[i].created_by_uid != my_uid ||
+ prog_infos[i].nr_map_ids != 1 ||
+ *(int *)(long)prog_infos[i].map_ids != map_infos[i].id ||
+ strcmp((char *)prog_infos[i].name, expected_prog_name),
+ "get-prog-info(fd)",
+ "err %d errno %d i %d type %d(%d) info_len %u(%zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
+ err, errno, i,
+ prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
+ info_len, sizeof(struct bpf_prog_info),
+ jit_enabled,
+ prog_infos[i].jited_prog_len,
+ prog_infos[i].xlated_prog_len,
+ !!memcmp(jited_insns, zeros, sizeof(zeros)),
+ !!memcmp(xlated_insns, zeros, sizeof(zeros)),
+ load_time, now,
+ prog_infos[i].created_by_uid, my_uid,
+ prog_infos[i].nr_map_ids, 1,
+ *(int *)(long)prog_infos[i].map_ids, map_infos[i].id,
+ prog_infos[i].name, expected_prog_name))
+ goto done;
+ }
+
+ /* Check bpf_prog_get_next_id() */
+ nr_id_found = 0;
+ next_id = 0;
+ while (!bpf_prog_get_next_id(next_id, &next_id)) {
+ struct bpf_prog_info prog_info = {};
+ __u32 saved_map_id;
+ int prog_fd;
+
+ info_len = sizeof(prog_info);
+
+ prog_fd = bpf_prog_get_fd_by_id(next_id);
+ if (prog_fd < 0 && errno == ENOENT)
+ /* The bpf_prog is in the dead row */
+ continue;
+ if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
+ "prog_fd %d next_id %d errno %d\n",
+ prog_fd, next_id, errno))
+ break;
+
+ for (i = 0; i < nr_iters; i++)
+ if (prog_infos[i].id == next_id)
+ break;
+
+ if (i == nr_iters)
+ continue;
+
+ nr_id_found++;
+
+ /* Negative test:
+ * prog_info.nr_map_ids = 1
+ * prog_info.map_ids = NULL
+ */
+ prog_info.nr_map_ids = 1;
+ err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
+ if (CHECK(!err || errno != EFAULT,
+ "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
+ err, errno, EFAULT))
+ break;
+ bzero(&prog_info, sizeof(prog_info));
+ info_len = sizeof(prog_info);
+
+ saved_map_id = *(int *)((long)prog_infos[i].map_ids);
+ prog_info.map_ids = prog_infos[i].map_ids;
+ prog_info.nr_map_ids = 2;
+ err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
+ prog_infos[i].jited_prog_insns = 0;
+ prog_infos[i].xlated_prog_insns = 0;
+ CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
+ memcmp(&prog_info, &prog_infos[i], info_len) ||
+ *(int *)(long)prog_info.map_ids != saved_map_id,
+ "get-prog-info(next_id->fd)",
+ "err %d errno %d info_len %u(%zu) memcmp %d map_id %u(%u)\n",
+ err, errno, info_len, sizeof(struct bpf_prog_info),
+ memcmp(&prog_info, &prog_infos[i], info_len),
+ *(int *)(long)prog_info.map_ids, saved_map_id);
+ close(prog_fd);
+ }
+ CHECK(nr_id_found != nr_iters,
+ "check total prog id found by get_next_id",
+ "nr_id_found %u(%u)\n",
+ nr_id_found, nr_iters);
+
+ /* Check bpf_map_get_next_id() */
+ nr_id_found = 0;
+ next_id = 0;
+ while (!bpf_map_get_next_id(next_id, &next_id)) {
+ struct bpf_map_info map_info = {};
+ int map_fd;
+
+ info_len = sizeof(map_info);
+
+ map_fd = bpf_map_get_fd_by_id(next_id);
+ if (map_fd < 0 && errno == ENOENT)
+ /* The bpf_map is in the dead row */
+ continue;
+ if (CHECK(map_fd < 0, "get-map-fd(next_id)",
+ "map_fd %d next_id %u errno %d\n",
+ map_fd, next_id, errno))
+ break;
+
+ for (i = 0; i < nr_iters; i++)
+ if (map_infos[i].id == next_id)
+ break;
+
+ if (i == nr_iters)
+ continue;
+
+ nr_id_found++;
+
+ err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
+ assert(!err);
+
+ err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
+ CHECK(err || info_len != sizeof(struct bpf_map_info) ||
+ memcmp(&map_info, &map_infos[i], info_len) ||
+ array_value != array_magic_value,
+ "check get-map-info(next_id->fd)",
+ "err %d errno %d info_len %u(%zu) memcmp %d array_value %llu(%llu)\n",
+ err, errno, info_len, sizeof(struct bpf_map_info),
+ memcmp(&map_info, &map_infos[i], info_len),
+ array_value, array_magic_value);
+
+ close(map_fd);
+ }
+ CHECK(nr_id_found != nr_iters,
+ "check total map id found by get_next_id",
+ "nr_id_found %u(%u)\n",
+ nr_id_found, nr_iters);
+
+done:
+ for (i = 0; i < nr_iters; i++)
+ bpf_object__close(objs[i]);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
new file mode 100644
index 000000000000..b74e2f6e96d0
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <test_progs.h>
+static int libbpf_debug_print(enum libbpf_print_level level,
+ const char *format, va_list args)
+{
+ if (level != LIBBPF_DEBUG)
+ return 0;
+
+ if (!strstr(format, "verifier log"))
+ return 0;
+ return vfprintf(stderr, "%s", args);
+}
+
+static int check_load(const char *file)
+{
+ struct bpf_prog_load_attr attr;
+ struct bpf_object *obj = NULL;
+ int err, prog_fd;
+
+ memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
+ attr.file = file;
+ attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
+ attr.log_level = 4;
+ err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
+ bpf_object__close(obj);
+ if (err)
+ error_cnt++;
+ return err;
+}
+
+void test_bpf_verif_scale(void)
+{
+ const char *file1 = "./test_verif_scale1.o";
+ const char *file2 = "./test_verif_scale2.o";
+ const char *file3 = "./test_verif_scale3.o";
+ int err;
+
+ if (verifier_stats)
+ libbpf_set_print(libbpf_debug_print);
+
+ err = check_load(file1);
+ err |= check_load(file2);
+ err |= check_load(file3);
+ if (!err)
+ printf("test_verif_scale:OK\n");
+ else
+ printf("test_verif_scale:FAIL\n");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
new file mode 100644
index 000000000000..c938283ac232
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <error.h>
+#include <linux/if.h>
+#include <linux/if_tun.h>
+#include <sys/uio.h>
+
+#define CHECK_FLOW_KEYS(desc, got, expected) \
+ CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \
+ desc, \
+ "nhoff=%u/%u " \
+ "thoff=%u/%u " \
+ "addr_proto=0x%x/0x%x " \
+ "is_frag=%u/%u " \
+ "is_first_frag=%u/%u " \
+ "is_encap=%u/%u " \
+ "ip_proto=0x%x/0x%x " \
+ "n_proto=0x%x/0x%x " \
+ "sport=%u/%u " \
+ "dport=%u/%u\n", \
+ got.nhoff, expected.nhoff, \
+ got.thoff, expected.thoff, \
+ got.addr_proto, expected.addr_proto, \
+ got.is_frag, expected.is_frag, \
+ got.is_first_frag, expected.is_first_frag, \
+ got.is_encap, expected.is_encap, \
+ got.ip_proto, expected.ip_proto, \
+ got.n_proto, expected.n_proto, \
+ got.sport, expected.sport, \
+ got.dport, expected.dport)
+
+struct ipv4_pkt {
+ struct ethhdr eth;
+ struct iphdr iph;
+ struct tcphdr tcp;
+} __packed;
+
+struct svlan_ipv4_pkt {
+ struct ethhdr eth;
+ __u16 vlan_tci;
+ __u16 vlan_proto;
+ struct iphdr iph;
+ struct tcphdr tcp;
+} __packed;
+
+struct ipv6_pkt {
+ struct ethhdr eth;
+ struct ipv6hdr iph;
+ struct tcphdr tcp;
+} __packed;
+
+struct dvlan_ipv6_pkt {
+ struct ethhdr eth;
+ __u16 vlan_tci;
+ __u16 vlan_proto;
+ __u16 vlan_tci2;
+ __u16 vlan_proto2;
+ struct ipv6hdr iph;
+ struct tcphdr tcp;
+} __packed;
+
+struct test {
+ const char *name;
+ union {
+ struct ipv4_pkt ipv4;
+ struct svlan_ipv4_pkt svlan_ipv4;
+ struct ipv6_pkt ipv6;
+ struct dvlan_ipv6_pkt dvlan_ipv6;
+ } pkt;
+ struct bpf_flow_keys keys;
+};
+
+#define VLAN_HLEN 4
+
+struct test tests[] = {
+ {
+ .name = "ipv4",
+ .pkt.ipv4 = {
+ .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
+ .iph.ihl = 5,
+ .iph.protocol = IPPROTO_TCP,
+ .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
+ .tcp.doff = 5,
+ },
+ .keys = {
+ .nhoff = ETH_HLEN,
+ .thoff = ETH_HLEN + sizeof(struct iphdr),
+ .addr_proto = ETH_P_IP,
+ .ip_proto = IPPROTO_TCP,
+ .n_proto = __bpf_constant_htons(ETH_P_IP),
+ },
+ },
+ {
+ .name = "ipv6",
+ .pkt.ipv6 = {
+ .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
+ .iph.nexthdr = IPPROTO_TCP,
+ .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
+ .tcp.doff = 5,
+ },
+ .keys = {
+ .nhoff = ETH_HLEN,
+ .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
+ .addr_proto = ETH_P_IPV6,
+ .ip_proto = IPPROTO_TCP,
+ .n_proto = __bpf_constant_htons(ETH_P_IPV6),
+ },
+ },
+ {
+ .name = "802.1q-ipv4",
+ .pkt.svlan_ipv4 = {
+ .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
+ .vlan_proto = __bpf_constant_htons(ETH_P_IP),
+ .iph.ihl = 5,
+ .iph.protocol = IPPROTO_TCP,
+ .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
+ .tcp.doff = 5,
+ },
+ .keys = {
+ .nhoff = ETH_HLEN + VLAN_HLEN,
+ .thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr),
+ .addr_proto = ETH_P_IP,
+ .ip_proto = IPPROTO_TCP,
+ .n_proto = __bpf_constant_htons(ETH_P_IP),
+ },
+ },
+ {
+ .name = "802.1ad-ipv6",
+ .pkt.dvlan_ipv6 = {
+ .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
+ .vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
+ .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
+ .iph.nexthdr = IPPROTO_TCP,
+ .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
+ .tcp.doff = 5,
+ },
+ .keys = {
+ .nhoff = ETH_HLEN + VLAN_HLEN * 2,
+ .thoff = ETH_HLEN + VLAN_HLEN * 2 +
+ sizeof(struct ipv6hdr),
+ .addr_proto = ETH_P_IPV6,
+ .ip_proto = IPPROTO_TCP,
+ .n_proto = __bpf_constant_htons(ETH_P_IPV6),
+ },
+ },
+};
+
+static int create_tap(const char *ifname)
+{
+ struct ifreq ifr = {
+ .ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS,
+ };
+ int fd, ret;
+
+ strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
+
+ fd = open("/dev/net/tun", O_RDWR);
+ if (fd < 0)
+ return -1;
+
+ ret = ioctl(fd, TUNSETIFF, &ifr);
+ if (ret)
+ return -1;
+
+ return fd;
+}
+
+static int tx_tap(int fd, void *pkt, size_t len)
+{
+ struct iovec iov[] = {
+ {
+ .iov_len = len,
+ .iov_base = pkt,
+ },
+ };
+ return writev(fd, iov, ARRAY_SIZE(iov));
+}
+
+static int ifup(const char *ifname)
+{
+ struct ifreq ifr = {};
+ int sk, ret;
+
+ strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
+
+ sk = socket(PF_INET, SOCK_DGRAM, 0);
+ if (sk < 0)
+ return -1;
+
+ ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
+ if (ret) {
+ close(sk);
+ return -1;
+ }
+
+ ifr.ifr_flags |= IFF_UP;
+ ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
+ if (ret) {
+ close(sk);
+ return -1;
+ }
+
+ close(sk);
+ return 0;
+}
+
+void test_flow_dissector(void)
+{
+ int i, err, prog_fd, keys_fd = -1, tap_fd;
+ struct bpf_object *obj;
+ __u32 duration = 0;
+
+ err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector",
+ "jmp_table", "last_dissection", &prog_fd, &keys_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ struct bpf_flow_keys flow_keys;
+ struct bpf_prog_test_run_attr tattr = {
+ .prog_fd = prog_fd,
+ .data_in = &tests[i].pkt,
+ .data_size_in = sizeof(tests[i].pkt),
+ .data_out = &flow_keys,
+ };
+
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) ||
+ err || tattr.retval != 1,
+ tests[i].name,
+ "err %d errno %d retval %d duration %d size %u/%lu\n",
+ err, errno, tattr.retval, tattr.duration,
+ tattr.data_size_out, sizeof(flow_keys));
+ CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
+ }
+
+ /* Do the same tests but for skb-less flow dissector.
+ * We use a known path in the net/tun driver that calls
+ * eth_get_headlen and we manually export bpf_flow_keys
+ * via BPF map in this case.
+ */
+
+ err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
+ CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno);
+
+ tap_fd = create_tap("tap0");
+ CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno);
+ err = ifup("tap0");
+ CHECK(err, "ifup", "err %d errno %d\n", err, errno);
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ struct bpf_flow_keys flow_keys = {};
+ struct bpf_prog_test_run_attr tattr = {};
+ __u32 key = 0;
+
+ err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
+ CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
+
+ err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
+ CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err);
+
+ CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err);
+ CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
+ }
+
+ bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR);
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c
new file mode 100644
index 000000000000..dc5ef155ec28
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_flow_dissector_load_bytes(void)
+{
+ struct bpf_flow_keys flow_keys;
+ __u32 duration = 0, retval, size;
+ struct bpf_insn prog[] = {
+ // BPF_REG_1 - 1st argument: context
+ // BPF_REG_2 - 2nd argument: offset, start at first byte
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ // BPF_REG_3 - 3rd argument: destination, reserve byte on stack
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_3, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -1),
+ // BPF_REG_4 - 4th argument: copy one byte
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ // bpf_skb_load_bytes(ctx, sizeof(pkt_v4), ptr, 1)
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_skb_load_bytes),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ // if (ret == 0) return BPF_DROP (2)
+ BPF_MOV64_IMM(BPF_REG_0, BPF_DROP),
+ BPF_EXIT_INSN(),
+ // if (ret != 0) return BPF_OK (0)
+ BPF_MOV64_IMM(BPF_REG_0, BPF_OK),
+ BPF_EXIT_INSN(),
+ };
+ int fd, err;
+
+ /* make sure bpf_skb_load_bytes is not allowed from skb-less context
+ */
+ fd = bpf_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog,
+ ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
+ CHECK(fd < 0,
+ "flow_dissector-bpf_skb_load_bytes-load",
+ "fd %d errno %d\n",
+ fd, errno);
+
+ err = bpf_prog_test_run(fd, 1, &pkt_v4, sizeof(pkt_v4),
+ &flow_keys, &size, &retval, &duration);
+ CHECK(size != sizeof(flow_keys) || err || retval != 1,
+ "flow_dissector-bpf_skb_load_bytes",
+ "err %d errno %d retval %d duration %d size %u/%zu\n",
+ err, errno, retval, duration, size, sizeof(flow_keys));
+
+ if (fd >= -1)
+ close(fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
new file mode 100644
index 000000000000..c2a0a9d5591b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+#define MAX_CNT_RAWTP 10ull
+#define MAX_STACK_RAWTP 100
+struct get_stack_trace_t {
+ int pid;
+ int kern_stack_size;
+ int user_stack_size;
+ int user_stack_buildid_size;
+ __u64 kern_stack[MAX_STACK_RAWTP];
+ __u64 user_stack[MAX_STACK_RAWTP];
+ struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
+};
+
+static int get_stack_print_output(void *data, int size)
+{
+ bool good_kern_stack = false, good_user_stack = false;
+ const char *nonjit_func = "___bpf_prog_run";
+ struct get_stack_trace_t *e = data;
+ int i, num_stack;
+ static __u64 cnt;
+ struct ksym *ks;
+
+ cnt++;
+
+ if (size < sizeof(struct get_stack_trace_t)) {
+ __u64 *raw_data = data;
+ bool found = false;
+
+ num_stack = size / sizeof(__u64);
+ /* If jit is enabled, we do not have a good way to
+ * verify the sanity of the kernel stack. So we
+ * just assume it is good if the stack is not empty.
+ * This could be improved in the future.
+ */
+ if (jit_enabled) {
+ found = num_stack > 0;
+ } else {
+ for (i = 0; i < num_stack; i++) {
+ ks = ksym_search(raw_data[i]);
+ if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (found) {
+ good_kern_stack = true;
+ good_user_stack = true;
+ }
+ } else {
+ num_stack = e->kern_stack_size / sizeof(__u64);
+ if (jit_enabled) {
+ good_kern_stack = num_stack > 0;
+ } else {
+ for (i = 0; i < num_stack; i++) {
+ ks = ksym_search(e->kern_stack[i]);
+ if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
+ good_kern_stack = true;
+ break;
+ }
+ }
+ }
+ if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
+ good_user_stack = true;
+ }
+ if (!good_kern_stack || !good_user_stack)
+ return LIBBPF_PERF_EVENT_ERROR;
+
+ if (cnt == MAX_CNT_RAWTP)
+ return LIBBPF_PERF_EVENT_DONE;
+
+ return LIBBPF_PERF_EVENT_CONT;
+}
+
+void test_get_stack_raw_tp(void)
+{
+ const char *file = "./test_get_stack_rawtp.o";
+ int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
+ struct perf_event_attr attr = {};
+ struct timespec tv = {0, 10};
+ __u32 key = 0, duration = 0;
+ struct bpf_object *obj;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
+ return;
+
+ efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
+ if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+
+ perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
+ if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
+ perfmap_fd, errno))
+ goto close_prog;
+
+ err = load_kallsyms();
+ if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ attr.sample_type = PERF_SAMPLE_RAW;
+ attr.type = PERF_TYPE_SOFTWARE;
+ attr.config = PERF_COUNT_SW_BPF_OUTPUT;
+ pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
+ -1/*group_fd*/, 0);
+ if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
+ errno))
+ goto close_prog;
+
+ err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
+ if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
+ errno))
+ goto close_prog;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
+ err, errno))
+ goto close_prog;
+
+ err = perf_event_mmap(pmu_fd);
+ if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ /* trigger some syscall action */
+ for (i = 0; i < MAX_CNT_RAWTP; i++)
+ nanosleep(&tv, NULL);
+
+ err = perf_event_poller(pmu_fd, get_stack_print_output);
+ if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ goto close_prog_noerr;
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c
new file mode 100644
index 000000000000..d011079fb0bf
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/global_data.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static void test_global_data_number(struct bpf_object *obj, __u32 duration)
+{
+ int i, err, map_fd;
+ uint64_t num;
+
+ map_fd = bpf_find_map(__func__, obj, "result_number");
+ if (map_fd < 0) {
+ error_cnt++;
+ return;
+ }
+
+ struct {
+ char *name;
+ uint32_t key;
+ uint64_t num;
+ } tests[] = {
+ { "relocate .bss reference", 0, 0 },
+ { "relocate .data reference", 1, 42 },
+ { "relocate .rodata reference", 2, 24 },
+ { "relocate .bss reference", 3, 0 },
+ { "relocate .data reference", 4, 0xffeeff },
+ { "relocate .rodata reference", 5, 0xabab },
+ { "relocate .bss reference", 6, 1234 },
+ { "relocate .bss reference", 7, 0 },
+ { "relocate .rodata reference", 8, 0xab },
+ { "relocate .rodata reference", 9, 0x1111111111111111 },
+ { "relocate .rodata reference", 10, ~0 },
+ };
+
+ for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+ err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num);
+ CHECK(err || num != tests[i].num, tests[i].name,
+ "err %d result %lx expected %lx\n",
+ err, num, tests[i].num);
+ }
+}
+
+static void test_global_data_string(struct bpf_object *obj, __u32 duration)
+{
+ int i, err, map_fd;
+ char str[32];
+
+ map_fd = bpf_find_map(__func__, obj, "result_string");
+ if (map_fd < 0) {
+ error_cnt++;
+ return;
+ }
+
+ struct {
+ char *name;
+ uint32_t key;
+ char str[32];
+ } tests[] = {
+ { "relocate .rodata reference", 0, "abcdefghijklmnopqrstuvwxyz" },
+ { "relocate .data reference", 1, "abcdefghijklmnopqrstuvwxyz" },
+ { "relocate .bss reference", 2, "" },
+ { "relocate .data reference", 3, "abcdexghijklmnopqrstuvwxyz" },
+ { "relocate .bss reference", 4, "\0\0hello" },
+ };
+
+ for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+ err = bpf_map_lookup_elem(map_fd, &tests[i].key, str);
+ CHECK(err || memcmp(str, tests[i].str, sizeof(str)),
+ tests[i].name, "err %d result \'%s\' expected \'%s\'\n",
+ err, str, tests[i].str);
+ }
+}
+
+struct foo {
+ __u8 a;
+ __u32 b;
+ __u64 c;
+};
+
+static void test_global_data_struct(struct bpf_object *obj, __u32 duration)
+{
+ int i, err, map_fd;
+ struct foo val;
+
+ map_fd = bpf_find_map(__func__, obj, "result_struct");
+ if (map_fd < 0) {
+ error_cnt++;
+ return;
+ }
+
+ struct {
+ char *name;
+ uint32_t key;
+ struct foo val;
+ } tests[] = {
+ { "relocate .rodata reference", 0, { 42, 0xfefeefef, 0x1111111111111111ULL, } },
+ { "relocate .bss reference", 1, { } },
+ { "relocate .rodata reference", 2, { } },
+ { "relocate .data reference", 3, { 41, 0xeeeeefef, 0x2111111111111111ULL, } },
+ };
+
+ for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+ err = bpf_map_lookup_elem(map_fd, &tests[i].key, &val);
+ CHECK(err || memcmp(&val, &tests[i].val, sizeof(val)),
+ tests[i].name, "err %d result { %u, %u, %llu } expected { %u, %u, %llu }\n",
+ err, val.a, val.b, val.c, tests[i].val.a, tests[i].val.b, tests[i].val.c);
+ }
+}
+
+static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration)
+{
+ int err = -ENOMEM, map_fd, zero = 0;
+ struct bpf_map *map;
+ __u8 *buff;
+
+ map = bpf_object__find_map_by_name(obj, "test_glo.rodata");
+ if (!map || !bpf_map__is_internal(map)) {
+ error_cnt++;
+ return;
+ }
+
+ map_fd = bpf_map__fd(map);
+ if (map_fd < 0) {
+ error_cnt++;
+ return;
+ }
+
+ buff = malloc(bpf_map__def(map)->value_size);
+ if (buff)
+ err = bpf_map_update_elem(map_fd, &zero, buff, 0);
+ free(buff);
+ CHECK(!err || errno != EPERM, "test .rodata read-only map",
+ "err %d errno %d\n", err, errno);
+}
+
+void test_global_data(void)
+{
+ const char *file = "./test_global_data.o";
+ __u32 duration = 0, retval;
+ struct bpf_object *obj;
+ int err, prog_fd;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ if (CHECK(err, "load program", "error %d loading %s\n", err, file))
+ return;
+
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "pass global data run",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+
+ test_global_data_number(obj, duration);
+ test_global_data_string(obj, duration);
+ test_global_data_struct(obj, duration);
+ test_global_data_rdonly(obj, duration);
+
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
new file mode 100644
index 000000000000..20ddca830e68
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static void test_l4lb(const char *file)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct vip key = {.protocol = 6};
+ struct vip_meta {
+ __u32 flags;
+ __u32 vip_num;
+ } value = {.vip_num = VIP_NUM};
+ __u32 stats_key = VIP_NUM;
+ struct vip_stats {
+ __u64 bytes;
+ __u64 pkts;
+ } stats[nr_cpus];
+ struct real_definition {
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ __u8 flags;
+ } real_def = {.dst = MAGIC_VAL};
+ __u32 ch_key = 11, real_num = 3;
+ __u32 duration, retval, size;
+ int err, i, prog_fd, map_fd;
+ __u64 bytes = 0, pkts = 0;
+ struct bpf_object *obj;
+ char buf[128];
+ u32 *magic = (u32 *)buf;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ map_fd = bpf_find_map(__func__, obj, "vip_map");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &key, &value, 0);
+
+ map_fd = bpf_find_map(__func__, obj, "ch_rings");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
+
+ map_fd = bpf_find_map(__func__, obj, "reals");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
+
+ err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
+ *magic != MAGIC_VAL, "ipv4",
+ "err %d errno %d retval %d size %d magic %x\n",
+ err, errno, retval, size, *magic);
+
+ err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
+ *magic != MAGIC_VAL, "ipv6",
+ "err %d errno %d retval %d size %d magic %x\n",
+ err, errno, retval, size, *magic);
+
+ map_fd = bpf_find_map(__func__, obj, "stats");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_lookup_elem(map_fd, &stats_key, stats);
+ for (i = 0; i < nr_cpus; i++) {
+ bytes += stats[i].bytes;
+ pkts += stats[i].pkts;
+ }
+ if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
+ error_cnt++;
+ printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
+ }
+out:
+ bpf_object__close(obj);
+}
+
+void test_l4lb_all(void)
+{
+ const char *file1 = "./test_l4lb.o";
+ const char *file2 = "./test_l4lb_noinline.o";
+
+ test_l4lb(file1);
+ test_l4lb(file2);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/map_lock.c b/tools/testing/selftests/bpf/prog_tests/map_lock.c
new file mode 100644
index 000000000000..ee99368c595c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_lock.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static void *parallel_map_access(void *arg)
+{
+ int err, map_fd = *(u32 *) arg;
+ int vars[17], i, j, rnd, key = 0;
+
+ for (i = 0; i < 10000; i++) {
+ err = bpf_map_lookup_elem_flags(map_fd, &key, vars, BPF_F_LOCK);
+ if (err) {
+ printf("lookup failed\n");
+ error_cnt++;
+ goto out;
+ }
+ if (vars[0] != 0) {
+ printf("lookup #%d var[0]=%d\n", i, vars[0]);
+ error_cnt++;
+ goto out;
+ }
+ rnd = vars[1];
+ for (j = 2; j < 17; j++) {
+ if (vars[j] == rnd)
+ continue;
+ printf("lookup #%d var[1]=%d var[%d]=%d\n",
+ i, rnd, j, vars[j]);
+ error_cnt++;
+ goto out;
+ }
+ }
+out:
+ pthread_exit(arg);
+}
+
+void test_map_lock(void)
+{
+ const char *file = "./test_map_lock.o";
+ int prog_fd, map_fd[2], vars[17] = {};
+ pthread_t thread_id[6];
+ struct bpf_object *obj = NULL;
+ int err = 0, key = 0, i;
+ void *ret;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
+ if (err) {
+ printf("test_map_lock:bpf_prog_load errno %d\n", errno);
+ goto close_prog;
+ }
+ map_fd[0] = bpf_find_map(__func__, obj, "hash_map");
+ if (map_fd[0] < 0)
+ goto close_prog;
+ map_fd[1] = bpf_find_map(__func__, obj, "array_map");
+ if (map_fd[1] < 0)
+ goto close_prog;
+
+ bpf_map_update_elem(map_fd[0], &key, vars, BPF_F_LOCK);
+
+ for (i = 0; i < 4; i++)
+ assert(pthread_create(&thread_id[i], NULL,
+ &spin_lock_thread, &prog_fd) == 0);
+ for (i = 4; i < 6; i++)
+ assert(pthread_create(&thread_id[i], NULL,
+ &parallel_map_access, &map_fd[i - 4]) == 0);
+ for (i = 0; i < 4; i++)
+ assert(pthread_join(thread_id[i], &ret) == 0 &&
+ ret == (void *)&prog_fd);
+ for (i = 4; i < 6; i++)
+ assert(pthread_join(thread_id[i], &ret) == 0 &&
+ ret == (void *)&map_fd[i - 4]);
+ goto close_prog_noerr;
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/obj_name.c b/tools/testing/selftests/bpf/prog_tests/obj_name.c
new file mode 100644
index 000000000000..e178416bddad
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/obj_name.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_obj_name(void)
+{
+ struct {
+ const char *name;
+ int success;
+ int expected_errno;
+ } tests[] = {
+ { "", 1, 0 },
+ { "_123456789ABCDE", 1, 0 },
+ { "_123456789ABCDEF", 0, EINVAL },
+ { "_123456789ABCD\n", 0, EINVAL },
+ };
+ struct bpf_insn prog[] = {
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ __u32 duration = 0;
+ int i;
+
+ for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
+ size_t name_len = strlen(tests[i].name) + 1;
+ union bpf_attr attr;
+ size_t ncopy;
+ int fd;
+
+ /* test different attr.prog_name during BPF_PROG_LOAD */
+ ncopy = name_len < sizeof(attr.prog_name) ?
+ name_len : sizeof(attr.prog_name);
+ bzero(&attr, sizeof(attr));
+ attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
+ attr.insn_cnt = 2;
+ attr.insns = ptr_to_u64(prog);
+ attr.license = ptr_to_u64("");
+ memcpy(attr.prog_name, tests[i].name, ncopy);
+
+ fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
+ CHECK((tests[i].success && fd < 0) ||
+ (!tests[i].success && fd != -1) ||
+ (!tests[i].success && errno != tests[i].expected_errno),
+ "check-bpf-prog-name",
+ "fd %d(%d) errno %d(%d)\n",
+ fd, tests[i].success, errno, tests[i].expected_errno);
+
+ if (fd != -1)
+ close(fd);
+
+ /* test different attr.map_name during BPF_MAP_CREATE */
+ ncopy = name_len < sizeof(attr.map_name) ?
+ name_len : sizeof(attr.map_name);
+ bzero(&attr, sizeof(attr));
+ attr.map_type = BPF_MAP_TYPE_ARRAY;
+ attr.key_size = 4;
+ attr.value_size = 4;
+ attr.max_entries = 1;
+ attr.map_flags = 0;
+ memcpy(attr.map_name, tests[i].name, ncopy);
+ fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
+ CHECK((tests[i].success && fd < 0) ||
+ (!tests[i].success && fd != -1) ||
+ (!tests[i].success && errno != tests[i].expected_errno),
+ "check-bpf-map-name",
+ "fd %d(%d) errno %d(%d)\n",
+ fd, tests[i].success, errno, tests[i].expected_errno);
+
+ if (fd != -1)
+ close(fd);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_access.c
new file mode 100644
index 000000000000..4ecfd721a044
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/pkt_access.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_pkt_access(void)
+{
+ const char *file = "./test_pkt_access.o";
+ struct bpf_object *obj;
+ __u32 duration, retval;
+ int err, prog_fd;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "ipv4",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+
+ err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "ipv6",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c
new file mode 100644
index 000000000000..ac0d43435806
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_pkt_md_access(void)
+{
+ const char *file = "./test_pkt_md_access.o";
+ struct bpf_object *obj;
+ __u32 duration, retval;
+ int err, prog_fd;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
new file mode 100644
index 000000000000..5dd89b941f53
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_prog_run_xattr(void)
+{
+ const char *file = "./test_pkt_access.o";
+ struct bpf_object *obj;
+ char buf[10];
+ int err;
+ struct bpf_prog_test_run_attr tattr = {
+ .repeat = 1,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .data_out = buf,
+ .data_size_out = 5,
+ };
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj,
+ &tattr.prog_fd);
+ if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
+ return;
+
+ memset(buf, 0, sizeof(buf));
+
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err != -1 || errno != ENOSPC || tattr.retval, "run",
+ "err %d errno %d retval %d\n", err, errno, tattr.retval);
+
+ CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
+ "incorrect output size, want %lu have %u\n",
+ sizeof(pkt_v4), tattr.data_size_out);
+
+ CHECK_ATTR(buf[5] != 0, "overflow",
+ "BPF_PROG_TEST_RUN ignored size hint\n");
+
+ tattr.data_out = NULL;
+ tattr.data_size_out = 0;
+ errno = 0;
+
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err || errno || tattr.retval, "run_no_output",
+ "err %d errno %d retval %d\n", err, errno, tattr.retval);
+
+ tattr.data_size_out = 1;
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err);
+
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
new file mode 100644
index 000000000000..e60cd5ff1f55
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+enum {
+ QUEUE,
+ STACK,
+};
+
+static void test_queue_stack_map_by_type(int type)
+{
+ const int MAP_SIZE = 32;
+ __u32 vals[MAP_SIZE], duration, retval, size, val;
+ int i, err, prog_fd, map_in_fd, map_out_fd;
+ char file[32], buf[128];
+ struct bpf_object *obj;
+ struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
+
+ /* Fill test values to be used */
+ for (i = 0; i < MAP_SIZE; i++)
+ vals[i] = rand();
+
+ if (type == QUEUE)
+ strncpy(file, "./test_queue_map.o", sizeof(file));
+ else if (type == STACK)
+ strncpy(file, "./test_stack_map.o", sizeof(file));
+ else
+ return;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ map_in_fd = bpf_find_map(__func__, obj, "map_in");
+ if (map_in_fd < 0)
+ goto out;
+
+ map_out_fd = bpf_find_map(__func__, obj, "map_out");
+ if (map_out_fd < 0)
+ goto out;
+
+ /* Push 32 elements to the input map */
+ for (i = 0; i < MAP_SIZE; i++) {
+ err = bpf_map_update_elem(map_in_fd, NULL, &vals[i], 0);
+ if (err) {
+ error_cnt++;
+ goto out;
+ }
+ }
+
+ /* The eBPF program pushes iph.saddr in the output map,
+ * pops the input map and saves this value in iph.daddr
+ */
+ for (i = 0; i < MAP_SIZE; i++) {
+ if (type == QUEUE) {
+ val = vals[i];
+ pkt_v4.iph.saddr = vals[i] * 5;
+ } else if (type == STACK) {
+ val = vals[MAP_SIZE - 1 - i];
+ pkt_v4.iph.saddr = vals[MAP_SIZE - 1 - i] * 5;
+ }
+
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+ if (err || retval || size != sizeof(pkt_v4) ||
+ iph->daddr != val)
+ break;
+ }
+
+ CHECK(err || retval || size != sizeof(pkt_v4) || iph->daddr != val,
+ "bpf_map_pop_elem",
+ "err %d errno %d retval %d size %d iph->daddr %u\n",
+ err, errno, retval, size, iph->daddr);
+
+ /* Queue is empty, program should return TC_ACT_SHOT */
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != 2 /* TC_ACT_SHOT */|| size != sizeof(pkt_v4),
+ "check-queue-stack-map-empty",
+ "err %d errno %d retval %d size %d\n",
+ err, errno, retval, size);
+
+ /* Check that the program pushed elements correctly */
+ for (i = 0; i < MAP_SIZE; i++) {
+ err = bpf_map_lookup_and_delete_elem(map_out_fd, NULL, &val);
+ if (err || val != vals[i] * 5)
+ break;
+ }
+
+ CHECK(i != MAP_SIZE && (err || val != vals[i] * 5),
+ "bpf_map_push_elem", "err %d value %u\n", err, val);
+
+out:
+ pkt_v4.iph.saddr = 0;
+ bpf_object__close(obj);
+}
+
+void test_queue_stack_map(void)
+{
+ test_queue_stack_map_by_type(QUEUE);
+ test_queue_stack_map_by_type(STACK);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c
new file mode 100644
index 000000000000..9807336a3016
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include <linux/nbd.h>
+
+void test_raw_tp_writable_reject_nbd_invalid(void)
+{
+ __u32 duration = 0;
+ char error[4096];
+ int bpf_fd = -1, tp_fd = -1;
+
+ const struct bpf_insn program[] = {
+ /* r6 is our tp buffer */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* one byte beyond the end of the nbd_request struct */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6,
+ sizeof(struct nbd_request)),
+ BPF_EXIT_INSN(),
+ };
+
+ struct bpf_load_program_attr load_attr = {
+ .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
+ .license = "GPL v2",
+ .insns = program,
+ .insns_cnt = sizeof(program) / sizeof(struct bpf_insn),
+ .log_level = 2,
+ };
+
+ bpf_fd = bpf_load_program_xattr(&load_attr, error, sizeof(error));
+ if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable load",
+ "failed: %d errno %d\n", bpf_fd, errno))
+ return;
+
+ tp_fd = bpf_raw_tracepoint_open("nbd_send_request", bpf_fd);
+ if (CHECK(tp_fd >= 0, "bpf_raw_tracepoint_writable open",
+ "erroneously succeeded\n"))
+ goto out_bpffd;
+
+ close(tp_fd);
+out_bpffd:
+ close(bpf_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c
new file mode 100644
index 000000000000..5c45424cac5f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include <linux/nbd.h>
+
+void test_raw_tp_writable_test_run(void)
+{
+ __u32 duration = 0;
+ char error[4096];
+
+ const struct bpf_insn trace_program[] = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+
+ struct bpf_load_program_attr load_attr = {
+ .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
+ .license = "GPL v2",
+ .insns = trace_program,
+ .insns_cnt = sizeof(trace_program) / sizeof(struct bpf_insn),
+ .log_level = 2,
+ };
+
+ int bpf_fd = bpf_load_program_xattr(&load_attr, error, sizeof(error));
+ if (CHECK(bpf_fd < 0, "bpf_raw_tracepoint_writable loaded",
+ "failed: %d errno %d\n", bpf_fd, errno))
+ return;
+
+ const struct bpf_insn skb_program[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+
+ struct bpf_load_program_attr skb_load_attr = {
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .license = "GPL v2",
+ .insns = skb_program,
+ .insns_cnt = sizeof(skb_program) / sizeof(struct bpf_insn),
+ };
+
+ int filter_fd =
+ bpf_load_program_xattr(&skb_load_attr, error, sizeof(error));
+ if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n",
+ filter_fd, errno))
+ goto out_bpffd;
+
+ int tp_fd = bpf_raw_tracepoint_open("bpf_test_finish", bpf_fd);
+ if (CHECK(tp_fd < 0, "bpf_raw_tracepoint_writable opened",
+ "failed: %d errno %d\n", tp_fd, errno))
+ goto out_filterfd;
+
+ char test_skb[128] = {
+ 0,
+ };
+
+ __u32 prog_ret;
+ int err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0,
+ 0, &prog_ret, 0);
+ CHECK(err != 42, "test_run",
+ "tracepoint did not modify return value\n");
+ CHECK(prog_ret != 0, "test_run_ret",
+ "socket_filter did not return 0\n");
+
+ close(tp_fd);
+
+ err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0, 0,
+ &prog_ret, 0);
+ CHECK(err != 0, "test_run_notrace",
+ "test_run failed with %d errno %d\n", err, errno);
+ CHECK(prog_ret != 0, "test_run_ret_notrace",
+ "socket_filter did not return 0\n");
+
+out_filterfd:
+ close(filter_fd);
+out_bpffd:
+ close(bpf_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
new file mode 100644
index 000000000000..5633be43828f
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static int libbpf_debug_print(enum libbpf_print_level level,
+ const char *format, va_list args)
+{
+ if (level == LIBBPF_DEBUG)
+ return 0;
+
+ return vfprintf(stderr, format, args);
+}
+
+void test_reference_tracking(void)
+{
+ const char *file = "./test_sk_lookup_kern.o";
+ struct bpf_object *obj;
+ struct bpf_program *prog;
+ __u32 duration = 0;
+ int err = 0;
+
+ obj = bpf_object__open(file);
+ if (IS_ERR(obj)) {
+ error_cnt++;
+ return;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ const char *title;
+
+ /* Ignore .text sections */
+ title = bpf_program__title(prog, false);
+ if (strstr(title, ".text") != NULL)
+ continue;
+
+ bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS);
+
+ /* Expect verifier failure if test name has 'fail' */
+ if (strstr(title, "fail") != NULL) {
+ libbpf_set_print(NULL);
+ err = !bpf_program__load(prog, "GPL", 0);
+ libbpf_set_print(libbpf_debug_print);
+ } else {
+ err = bpf_program__load(prog, "GPL", 0);
+ }
+ CHECK(err, title, "\n");
+ }
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/signal_pending.c b/tools/testing/selftests/bpf/prog_tests/signal_pending.c
new file mode 100644
index 000000000000..996e808f43a2
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/signal_pending.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static void sigalrm_handler(int s) {}
+static struct sigaction sigalrm_action = {
+ .sa_handler = sigalrm_handler,
+};
+
+static void test_signal_pending_by_type(enum bpf_prog_type prog_type)
+{
+ struct bpf_insn prog[4096];
+ struct itimerval timeo = {
+ .it_value.tv_usec = 100000, /* 100ms */
+ };
+ __u32 duration = 0, retval;
+ int prog_fd;
+ int err;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(prog); i++)
+ prog[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
+ prog[ARRAY_SIZE(prog) - 1] = BPF_EXIT_INSN();
+
+ prog_fd = bpf_load_program(prog_type, prog, ARRAY_SIZE(prog),
+ "GPL", 0, NULL, 0);
+ CHECK(prog_fd < 0, "test-run", "errno %d\n", errno);
+
+ err = sigaction(SIGALRM, &sigalrm_action, NULL);
+ CHECK(err, "test-run-signal-sigaction", "errno %d\n", errno);
+
+ err = setitimer(ITIMER_REAL, &timeo, NULL);
+ CHECK(err, "test-run-signal-timer", "errno %d\n", errno);
+
+ err = bpf_prog_test_run(prog_fd, 0xffffffff, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, &retval, &duration);
+ CHECK(duration > 500000000, /* 500ms */
+ "test-run-signal-duration",
+ "duration %dns > 500ms\n",
+ duration);
+
+ signal(SIGALRM, SIG_DFL);
+}
+
+void test_signal_pending(enum bpf_prog_type prog_type)
+{
+ test_signal_pending_by_type(BPF_PROG_TYPE_SOCKET_FILTER);
+ test_signal_pending_by_type(BPF_PROG_TYPE_FLOW_DISSECTOR);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
new file mode 100644
index 000000000000..e95baa32e277
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_skb_ctx(void)
+{
+ struct __sk_buff skb = {
+ .cb[0] = 1,
+ .cb[1] = 2,
+ .cb[2] = 3,
+ .cb[3] = 4,
+ .cb[4] = 5,
+ .priority = 6,
+ };
+ struct bpf_prog_test_run_attr tattr = {
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .ctx_in = &skb,
+ .ctx_size_in = sizeof(skb),
+ .ctx_out = &skb,
+ .ctx_size_out = sizeof(skb),
+ };
+ struct bpf_object *obj;
+ int err;
+ int i;
+
+ err = bpf_prog_load("./test_skb_ctx.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
+ &tattr.prog_fd);
+ if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
+ return;
+
+ /* ctx_in != NULL, ctx_size_in == 0 */
+
+ tattr.ctx_size_in = 0;
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err == 0, "ctx_size_in", "err %d errno %d\n", err, errno);
+ tattr.ctx_size_in = sizeof(skb);
+
+ /* ctx_out != NULL, ctx_size_out == 0 */
+
+ tattr.ctx_size_out = 0;
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err == 0, "ctx_size_out", "err %d errno %d\n", err, errno);
+ tattr.ctx_size_out = sizeof(skb);
+
+ /* non-zero [len, tc_index] fields should be rejected*/
+
+ skb.len = 1;
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err == 0, "len", "err %d errno %d\n", err, errno);
+ skb.len = 0;
+
+ skb.tc_index = 1;
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err == 0, "tc_index", "err %d errno %d\n", err, errno);
+ skb.tc_index = 0;
+
+ /* non-zero [hash, sk] fields should be rejected */
+
+ skb.hash = 1;
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err == 0, "hash", "err %d errno %d\n", err, errno);
+ skb.hash = 0;
+
+ skb.sk = (struct bpf_sock *)1;
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err == 0, "sk", "err %d errno %d\n", err, errno);
+ skb.sk = 0;
+
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err != 0 || tattr.retval,
+ "run",
+ "err %d errno %d retval %d\n",
+ err, errno, tattr.retval);
+
+ CHECK_ATTR(tattr.ctx_size_out != sizeof(skb),
+ "ctx_size_out",
+ "incorrect output size, want %lu have %u\n",
+ sizeof(skb), tattr.ctx_size_out);
+
+ for (i = 0; i < 5; i++)
+ CHECK_ATTR(skb.cb[i] != i + 2,
+ "ctx_out_cb",
+ "skb->cb[i] == %d, expected %d\n",
+ skb.cb[i], i + 2);
+ CHECK_ATTR(skb.priority != 7,
+ "ctx_out_priority",
+ "skb->priority == %d, expected %d\n",
+ skb.priority, 7);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c
new file mode 100644
index 000000000000..114ebe6a438e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/spinlock.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_spinlock(void)
+{
+ const char *file = "./test_spin_lock.o";
+ pthread_t thread_id[4];
+ struct bpf_object *obj = NULL;
+ int prog_fd;
+ int err = 0, i;
+ void *ret;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
+ if (err) {
+ printf("test_spin_lock:bpf_prog_load errno %d\n", errno);
+ goto close_prog;
+ }
+ for (i = 0; i < 4; i++)
+ assert(pthread_create(&thread_id[i], NULL,
+ &spin_lock_thread, &prog_fd) == 0);
+ for (i = 0; i < 4; i++)
+ assert(pthread_join(thread_id[i], &ret) == 0 &&
+ ret == (void *)&prog_fd);
+ goto close_prog_noerr;
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
new file mode 100644
index 000000000000..3aab2b083c71
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_stacktrace_build_id(void)
+{
+ int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
+ const char *file = "./test_stacktrace_build_id.o";
+ int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
+ struct perf_event_attr attr = {};
+ __u32 key, previous_key, val, duration = 0;
+ struct bpf_object *obj;
+ char buf[256];
+ int i, j;
+ struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
+ int build_id_matches = 0;
+ int retry = 1;
+
+retry:
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ goto out;
+
+ /* Get the ID for the sched/sched_switch tracepoint */
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/random/urandom_read/id");
+ efd = open(buf, O_RDONLY, 0);
+ if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+
+ bytes = read(efd, buf, sizeof(buf));
+ close(efd);
+ if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
+ "read", "bytes %d errno %d\n", bytes, errno))
+ goto close_prog;
+
+ /* Open the perf event and attach bpf progrram */
+ attr.config = strtol(buf, NULL, 0);
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+ pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
+ pmu_fd, errno))
+ goto close_prog;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+ err, errno))
+ goto close_pmu;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+ if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+ err, errno))
+ goto disable_pmu;
+
+ /* find map fds */
+ control_map_fd = bpf_find_map(__func__, obj, "control_map");
+ if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+ if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+ if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
+ err, errno))
+ goto disable_pmu;
+
+ stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
+ if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
+ == 0);
+ assert(system("./urandom_read") == 0);
+ /* disable stack trace collection */
+ key = 0;
+ val = 1;
+ bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+ /* for every element in stackid_hmap, we can find a corresponding one
+ * in stackmap, and vise versa.
+ */
+ err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+ if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+ if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = extract_build_id(buf, 256);
+
+ if (CHECK(err, "get build_id with readelf",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
+ if (CHECK(err, "get_next_key from stackmap",
+ "err %d, errno %d\n", err, errno))
+ goto disable_pmu;
+
+ do {
+ char build_id[64];
+
+ err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
+ if (CHECK(err, "lookup_elem from stackmap",
+ "err %d, errno %d\n", err, errno))
+ goto disable_pmu;
+ for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
+ if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
+ id_offs[i].offset != 0) {
+ for (j = 0; j < 20; ++j)
+ sprintf(build_id + 2 * j, "%02x",
+ id_offs[i].build_id[j] & 0xff);
+ if (strstr(buf, build_id) != NULL)
+ build_id_matches = 1;
+ }
+ previous_key = key;
+ } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
+
+ /* stack_map_get_build_id_offset() is racy and sometimes can return
+ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
+ * try it one more time.
+ */
+ if (build_id_matches < 1 && retry--) {
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+ close(pmu_fd);
+ bpf_object__close(obj);
+ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
+ __func__);
+ goto retry;
+ }
+
+ if (CHECK(build_id_matches < 1, "build id match",
+ "Didn't find expected build ID from the map\n"))
+ goto disable_pmu;
+
+ stack_trace_len = PERF_MAX_STACK_DEPTH
+ * sizeof(struct bpf_stack_build_id);
+ err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
+ CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
+ "err %d errno %d\n", err, errno);
+
+disable_pmu:
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+
+close_pmu:
+ close(pmu_fd);
+
+close_prog:
+ bpf_object__close(obj);
+
+out:
+ return;
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
new file mode 100644
index 000000000000..1c1a2f75f3d8
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static __u64 read_perf_max_sample_freq(void)
+{
+ __u64 sample_freq = 5000; /* fallback to 5000 on error */
+ FILE *f;
+
+ f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
+ if (f == NULL)
+ return sample_freq;
+ fscanf(f, "%llu", &sample_freq);
+ fclose(f);
+ return sample_freq;
+}
+
+void test_stacktrace_build_id_nmi(void)
+{
+ int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
+ const char *file = "./test_stacktrace_build_id.o";
+ int err, pmu_fd, prog_fd;
+ struct perf_event_attr attr = {
+ .freq = 1,
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ };
+ __u32 key, previous_key, val, duration = 0;
+ struct bpf_object *obj;
+ char buf[256];
+ int i, j;
+ struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
+ int build_id_matches = 0;
+ int retry = 1;
+
+ attr.sample_freq = read_perf_max_sample_freq();
+
+retry:
+ err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ return;
+
+ pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(pmu_fd < 0, "perf_event_open",
+ "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
+ pmu_fd, errno))
+ goto close_prog;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+ err, errno))
+ goto close_pmu;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+ if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+ err, errno))
+ goto disable_pmu;
+
+ /* find map fds */
+ control_map_fd = bpf_find_map(__func__, obj, "control_map");
+ if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+ if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+ if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
+ err, errno))
+ goto disable_pmu;
+
+ stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
+ if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
+ == 0);
+ assert(system("taskset 0x1 ./urandom_read 100000") == 0);
+ /* disable stack trace collection */
+ key = 0;
+ val = 1;
+ bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+ /* for every element in stackid_hmap, we can find a corresponding one
+ * in stackmap, and vise versa.
+ */
+ err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+ if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+ if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = extract_build_id(buf, 256);
+
+ if (CHECK(err, "get build_id with readelf",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu;
+
+ err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
+ if (CHECK(err, "get_next_key from stackmap",
+ "err %d, errno %d\n", err, errno))
+ goto disable_pmu;
+
+ do {
+ char build_id[64];
+
+ err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
+ if (CHECK(err, "lookup_elem from stackmap",
+ "err %d, errno %d\n", err, errno))
+ goto disable_pmu;
+ for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
+ if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
+ id_offs[i].offset != 0) {
+ for (j = 0; j < 20; ++j)
+ sprintf(build_id + 2 * j, "%02x",
+ id_offs[i].build_id[j] & 0xff);
+ if (strstr(buf, build_id) != NULL)
+ build_id_matches = 1;
+ }
+ previous_key = key;
+ } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
+
+ /* stack_map_get_build_id_offset() is racy and sometimes can return
+ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
+ * try it one more time.
+ */
+ if (build_id_matches < 1 && retry--) {
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+ close(pmu_fd);
+ bpf_object__close(obj);
+ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
+ __func__);
+ goto retry;
+ }
+
+ if (CHECK(build_id_matches < 1, "build id match",
+ "Didn't find expected build ID from the map\n"))
+ goto disable_pmu;
+
+ /*
+ * We intentionally skip compare_stack_ips(). This is because we
+ * only support one in_nmi() ips-to-build_id translation per cpu
+ * at any time, thus stack_amap here will always fallback to
+ * BPF_STACK_BUILD_ID_IP;
+ */
+
+disable_pmu:
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+
+close_pmu:
+ close(pmu_fd);
+
+close_prog:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
new file mode 100644
index 000000000000..2bfd50a0d6d1
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_stacktrace_map(void)
+{
+ int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
+ const char *file = "./test_stacktrace_map.o";
+ int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
+ struct perf_event_attr attr = {};
+ __u32 key, val, duration = 0;
+ struct bpf_object *obj;
+ char buf[256];
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ return;
+
+ /* Get the ID for the sched/sched_switch tracepoint */
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
+ efd = open(buf, O_RDONLY, 0);
+ if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+
+ bytes = read(efd, buf, sizeof(buf));
+ close(efd);
+ if (bytes <= 0 || bytes >= sizeof(buf))
+ goto close_prog;
+
+ /* Open the perf event and attach bpf progrram */
+ attr.config = strtol(buf, NULL, 0);
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+ pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
+ pmu_fd, errno))
+ goto close_prog;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (err)
+ goto disable_pmu;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+ if (err)
+ goto disable_pmu;
+
+ /* find map fds */
+ control_map_fd = bpf_find_map(__func__, obj, "control_map");
+ if (control_map_fd < 0)
+ goto disable_pmu;
+
+ stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+ if (stackid_hmap_fd < 0)
+ goto disable_pmu;
+
+ stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+ if (stackmap_fd < 0)
+ goto disable_pmu;
+
+ stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
+ if (stack_amap_fd < 0)
+ goto disable_pmu;
+
+ /* give some time for bpf program run */
+ sleep(1);
+
+ /* disable stack trace collection */
+ key = 0;
+ val = 1;
+ bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+ /* for every element in stackid_hmap, we can find a corresponding one
+ * in stackmap, and vise versa.
+ */
+ err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+ if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu_noerr;
+
+ err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+ if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu_noerr;
+
+ stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
+ err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
+ if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
+ "err %d errno %d\n", err, errno))
+ goto disable_pmu_noerr;
+
+ goto disable_pmu_noerr;
+disable_pmu:
+ error_cnt++;
+disable_pmu_noerr:
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+ close(pmu_fd);
+close_prog:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
new file mode 100644
index 000000000000..1f8387d80fd7
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_stacktrace_map_raw_tp(void)
+{
+ int control_map_fd, stackid_hmap_fd, stackmap_fd;
+ const char *file = "./test_stacktrace_map.o";
+ int efd, err, prog_fd;
+ __u32 key, val, duration = 0;
+ struct bpf_object *obj;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
+ return;
+
+ efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
+ if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+
+ /* find map fds */
+ control_map_fd = bpf_find_map(__func__, obj, "control_map");
+ if (control_map_fd < 0)
+ goto close_prog;
+
+ stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+ if (stackid_hmap_fd < 0)
+ goto close_prog;
+
+ stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+ if (stackmap_fd < 0)
+ goto close_prog;
+
+ /* give some time for bpf program run */
+ sleep(1);
+
+ /* disable stack trace collection */
+ key = 0;
+ val = 1;
+ bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+ /* for every element in stackid_hmap, we can find a corresponding one
+ * in stackmap, and vise versa.
+ */
+ err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+ if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+ "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+ if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+ "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ goto close_prog_noerr;
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c
new file mode 100644
index 000000000000..958a3d88de99
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_task_fd_query_rawtp(void)
+{
+ const char *file = "./test_get_stack_rawtp.o";
+ __u64 probe_offset, probe_addr;
+ __u32 len, prog_id, fd_type;
+ struct bpf_object *obj;
+ int efd, err, prog_fd;
+ __u32 duration = 0;
+ char buf[256];
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
+ return;
+
+ efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
+ if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+
+ /* query (getpid(), efd) */
+ len = sizeof(buf);
+ err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
+ &fd_type, &probe_offset, &probe_addr);
+ if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
+ errno))
+ goto close_prog;
+
+ err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
+ strcmp(buf, "sys_enter") == 0;
+ if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
+ fd_type, buf))
+ goto close_prog;
+
+ /* test zero len */
+ len = 0;
+ err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
+ &fd_type, &probe_offset, &probe_addr);
+ if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
+ err, errno))
+ goto close_prog;
+ err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
+ len == strlen("sys_enter");
+ if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
+ goto close_prog;
+
+ /* test empty buffer */
+ len = sizeof(buf);
+ err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
+ &fd_type, &probe_offset, &probe_addr);
+ if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
+ err, errno))
+ goto close_prog;
+ err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
+ len == strlen("sys_enter");
+ if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
+ goto close_prog;
+
+ /* test smaller buffer */
+ len = 3;
+ err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
+ &fd_type, &probe_offset, &probe_addr);
+ if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
+ "err %d errno %d\n", err, errno))
+ goto close_prog;
+ err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
+ len == strlen("sys_enter") &&
+ strcmp(buf, "sy") == 0;
+ if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
+ goto close_prog;
+
+ goto close_prog_noerr;
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c
new file mode 100644
index 000000000000..f9b70e81682b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+static void test_task_fd_query_tp_core(const char *probe_name,
+ const char *tp_name)
+{
+ const char *file = "./test_tracepoint.o";
+ int err, bytes, efd, prog_fd, pmu_fd;
+ struct perf_event_attr attr = {};
+ __u64 probe_offset, probe_addr;
+ __u32 len, prog_id, fd_type;
+ struct bpf_object *obj = NULL;
+ __u32 duration = 0;
+ char buf[256];
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/%s/id", probe_name);
+ efd = open(buf, O_RDONLY, 0);
+ if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+ goto close_prog;
+ bytes = read(efd, buf, sizeof(buf));
+ close(efd);
+ if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
+ "bytes %d errno %d\n", bytes, errno))
+ goto close_prog;
+
+ attr.config = strtol(buf, NULL, 0);
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type = PERF_SAMPLE_RAW;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+ pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
+ goto close_pmu;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
+ errno))
+ goto close_pmu;
+
+ err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+ if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
+ errno))
+ goto close_pmu;
+
+ /* query (getpid(), pmu_fd) */
+ len = sizeof(buf);
+ err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
+ &fd_type, &probe_offset, &probe_addr);
+ if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
+ errno))
+ goto close_pmu;
+
+ err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
+ if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
+ fd_type, buf))
+ goto close_pmu;
+
+ close(pmu_fd);
+ goto close_prog_noerr;
+
+close_pmu:
+ close(pmu_fd);
+close_prog:
+ error_cnt++;
+close_prog_noerr:
+ bpf_object__close(obj);
+}
+
+void test_task_fd_query_tp(void)
+{
+ test_task_fd_query_tp_core("sched/sched_switch",
+ "sched_switch");
+ test_task_fd_query_tp_core("syscalls/sys_enter_read",
+ "sys_enter_read");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_estats.c b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c
new file mode 100644
index 000000000000..bb8759d69099
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_estats.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_tcp_estats(void)
+{
+ const char *file = "./test_tcp_estats.o";
+ int err, prog_fd;
+ struct bpf_object *obj;
+ __u32 duration = 0;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+ CHECK(err, "", "err %d errno %d\n", err, errno);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
new file mode 100644
index 000000000000..fb095e5cd9af
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_tp_attach_query(void)
+{
+ const int num_progs = 3;
+ int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
+ __u32 duration = 0, info_len, saved_prog_ids[num_progs];
+ const char *file = "./test_tracepoint.o";
+ struct perf_event_query_bpf *query;
+ struct perf_event_attr attr = {};
+ struct bpf_object *obj[num_progs];
+ struct bpf_prog_info prog_info;
+ char buf[256];
+
+ for (i = 0; i < num_progs; i++)
+ obj[i] = NULL;
+
+ snprintf(buf, sizeof(buf),
+ "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
+ efd = open(buf, O_RDONLY, 0);
+ if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+ return;
+ bytes = read(efd, buf, sizeof(buf));
+ close(efd);
+ if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
+ "read", "bytes %d errno %d\n", bytes, errno))
+ return;
+
+ attr.config = strtol(buf, NULL, 0);
+ attr.type = PERF_TYPE_TRACEPOINT;
+ attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
+
+ query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
+ for (i = 0; i < num_progs; i++) {
+ err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
+ &prog_fd[i]);
+ if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+ goto cleanup1;
+
+ bzero(&prog_info, sizeof(prog_info));
+ prog_info.jited_prog_len = 0;
+ prog_info.xlated_prog_len = 0;
+ prog_info.nr_map_ids = 0;
+ info_len = sizeof(prog_info);
+ err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
+ if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
+ err, errno))
+ goto cleanup1;
+ saved_prog_ids[i] = prog_info.id;
+
+ pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+ 0 /* cpu 0 */, -1 /* group id */,
+ 0 /* flags */);
+ if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
+ pmu_fd[i], errno))
+ goto cleanup2;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
+ if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+ err, errno))
+ goto cleanup3;
+
+ if (i == 0) {
+ /* check NULL prog array query */
+ query->ids_len = num_progs;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(err || query->prog_cnt != 0,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+ }
+
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
+ if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+ err, errno))
+ goto cleanup3;
+
+ if (i == 1) {
+ /* try to get # of programs only */
+ query->ids_len = 0;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(err || query->prog_cnt != 2,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+
+ /* try a few negative tests */
+ /* invalid query pointer */
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
+ (struct perf_event_query_bpf *)0x1);
+ if (CHECK(!err || errno != EFAULT,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d\n", err, errno))
+ goto cleanup3;
+
+ /* no enough space */
+ query->ids_len = 1;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+ }
+
+ query->ids_len = num_progs;
+ err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+ if (CHECK(err || query->prog_cnt != (i + 1),
+ "perf_event_ioc_query_bpf",
+ "err %d errno %d query->prog_cnt %u\n",
+ err, errno, query->prog_cnt))
+ goto cleanup3;
+ for (j = 0; j < i + 1; j++)
+ if (CHECK(saved_prog_ids[j] != query->ids[j],
+ "perf_event_ioc_query_bpf",
+ "#%d saved_prog_id %x query prog_id %x\n",
+ j, saved_prog_ids[j], query->ids[j]))
+ goto cleanup3;
+ }
+
+ i = num_progs - 1;
+ for (; i >= 0; i--) {
+ cleanup3:
+ ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
+ cleanup2:
+ close(pmu_fd[i]);
+ cleanup1:
+ bpf_object__close(obj[i]);
+ }
+ free(query);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp.c b/tools/testing/selftests/bpf/prog_tests/xdp.c
new file mode 100644
index 000000000000..a74167289545
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_xdp(void)
+{
+ struct vip key4 = {.protocol = 6, .family = AF_INET};
+ struct vip key6 = {.protocol = 6, .family = AF_INET6};
+ struct iptnl_info value4 = {.family = AF_INET};
+ struct iptnl_info value6 = {.family = AF_INET6};
+ const char *file = "./test_xdp.o";
+ struct bpf_object *obj;
+ char buf[128];
+ struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
+ struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
+ __u32 duration, retval, size;
+ int err, prog_fd, map_fd;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ map_fd = bpf_find_map(__func__, obj, "vip2tnl");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &key4, &value4, 0);
+ bpf_map_update_elem(map_fd, &key6, &value6, 0);
+
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+
+ CHECK(err || retval != XDP_TX || size != 74 ||
+ iph->protocol != IPPROTO_IPIP, "ipv4",
+ "err %d errno %d retval %d size %d\n",
+ err, errno, retval, size);
+
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != XDP_TX || size != 114 ||
+ iph6->nexthdr != IPPROTO_IPV6, "ipv6",
+ "err %d errno %d retval %d size %d\n",
+ err, errno, retval, size);
+out:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
new file mode 100644
index 000000000000..922aa0a19764
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_xdp_adjust_tail(void)
+{
+ const char *file = "./test_adjust_tail.o";
+ struct bpf_object *obj;
+ char buf[128];
+ __u32 duration, retval, size;
+ int err, prog_fd;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+
+ CHECK(err || retval != XDP_DROP,
+ "ipv4", "err %d errno %d retval %d size %d\n",
+ err, errno, retval, size);
+
+ err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != XDP_TX || size != 54,
+ "ipv6", "err %d errno %d retval %d size %d\n",
+ err, errno, retval, size);
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c
new file mode 100644
index 000000000000..09e6b46f5515
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+void test_xdp_noinline(void)
+{
+ const char *file = "./test_xdp_noinline.o";
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct vip key = {.protocol = 6};
+ struct vip_meta {
+ __u32 flags;
+ __u32 vip_num;
+ } value = {.vip_num = VIP_NUM};
+ __u32 stats_key = VIP_NUM;
+ struct vip_stats {
+ __u64 bytes;
+ __u64 pkts;
+ } stats[nr_cpus];
+ struct real_definition {
+ union {
+ __be32 dst;
+ __be32 dstv6[4];
+ };
+ __u8 flags;
+ } real_def = {.dst = MAGIC_VAL};
+ __u32 ch_key = 11, real_num = 3;
+ __u32 duration, retval, size;
+ int err, i, prog_fd, map_fd;
+ __u64 bytes = 0, pkts = 0;
+ struct bpf_object *obj;
+ char buf[128];
+ u32 *magic = (u32 *)buf;
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+ if (err) {
+ error_cnt++;
+ return;
+ }
+
+ map_fd = bpf_find_map(__func__, obj, "vip_map");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &key, &value, 0);
+
+ map_fd = bpf_find_map(__func__, obj, "ch_rings");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
+
+ map_fd = bpf_find_map(__func__, obj, "reals");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
+
+ err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != 1 || size != 54 ||
+ *magic != MAGIC_VAL, "ipv4",
+ "err %d errno %d retval %d size %d magic %x\n",
+ err, errno, retval, size, *magic);
+
+ err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
+ buf, &size, &retval, &duration);
+ CHECK(err || retval != 1 || size != 74 ||
+ *magic != MAGIC_VAL, "ipv6",
+ "err %d errno %d retval %d size %d magic %x\n",
+ err, errno, retval, size, *magic);
+
+ map_fd = bpf_find_map(__func__, obj, "stats");
+ if (map_fd < 0)
+ goto out;
+ bpf_map_lookup_elem(map_fd, &stats_key, stats);
+ for (i = 0; i < nr_cpus; i++) {
+ bytes += stats[i].bytes;
+ pkts += stats[i].pkts;
+ }
+ if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
+ error_cnt++;
+ printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
+ }
+out:
+ bpf_object__close(obj);
+}
diff --git a/tools/testing/selftests/bpf/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c
index 284660f5aa95..81ad9a0b29d0 100644
--- a/tools/testing/selftests/bpf/bpf_flow.c
+++ b/tools/testing/selftests/bpf/progs/bpf_flow.c
@@ -64,6 +64,25 @@ struct bpf_map_def SEC("maps") jmp_table = {
.max_entries = 8
};
+struct bpf_map_def SEC("maps") last_dissection = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct bpf_flow_keys),
+ .max_entries = 1,
+};
+
+static __always_inline int export_flow_keys(struct bpf_flow_keys *keys,
+ int ret)
+{
+ struct bpf_flow_keys *val;
+ __u32 key = 0;
+
+ val = bpf_map_lookup_elem(&last_dissection, &key);
+ if (val)
+ memcpy(val, keys, sizeof(*val));
+ return ret;
+}
+
static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb,
__u16 hdr_size,
void *buffer)
@@ -92,7 +111,6 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
{
struct bpf_flow_keys *keys = skb->flow_keys;
- keys->n_proto = proto;
switch (proto) {
case bpf_htons(ETH_P_IP):
bpf_tail_call(skb, &jmp_table, IP);
@@ -110,19 +128,18 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
break;
default:
/* Protocol not supported */
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
}
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
}
SEC("flow_dissector")
int _dissect(struct __sk_buff *skb)
{
- if (!skb->vlan_present)
- return parse_eth_proto(skb, skb->protocol);
- else
- return parse_eth_proto(skb, skb->vlan_proto);
+ struct bpf_flow_keys *keys = skb->flow_keys;
+
+ return parse_eth_proto(skb, keys->n_proto);
}
/* Parses on IPPROTO_* */
@@ -141,8 +158,8 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
case IPPROTO_ICMP:
icmp = bpf_flow_dissect_get_header(skb, sizeof(*icmp), &_icmp);
if (!icmp)
- return BPF_DROP;
- return BPF_OK;
+ return export_flow_keys(keys, BPF_DROP);
+ return export_flow_keys(keys, BPF_OK);
case IPPROTO_IPIP:
keys->is_encap = true;
return parse_eth_proto(skb, bpf_htons(ETH_P_IP));
@@ -152,11 +169,11 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
case IPPROTO_GRE:
gre = bpf_flow_dissect_get_header(skb, sizeof(*gre), &_gre);
if (!gre)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
if (bpf_htons(gre->flags & GRE_VERSION))
/* Only inspect standard GRE packets with version 0 */
- return BPF_OK;
+ return export_flow_keys(keys, BPF_OK);
keys->thoff += sizeof(*gre); /* Step over GRE Flags and Proto */
if (GRE_IS_CSUM(gre->flags))
@@ -172,7 +189,7 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
eth = bpf_flow_dissect_get_header(skb, sizeof(*eth),
&_eth);
if (!eth)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
keys->thoff += sizeof(*eth);
@@ -183,31 +200,31 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
case IPPROTO_TCP:
tcp = bpf_flow_dissect_get_header(skb, sizeof(*tcp), &_tcp);
if (!tcp)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
if (tcp->doff < 5)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
if ((__u8 *)tcp + (tcp->doff << 2) > data_end)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
keys->sport = tcp->source;
keys->dport = tcp->dest;
- return BPF_OK;
+ return export_flow_keys(keys, BPF_OK);
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
udp = bpf_flow_dissect_get_header(skb, sizeof(*udp), &_udp);
if (!udp)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
keys->sport = udp->source;
keys->dport = udp->dest;
- return BPF_OK;
+ return export_flow_keys(keys, BPF_OK);
default:
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
}
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
}
static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr)
@@ -227,7 +244,7 @@ static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr)
return parse_ip_proto(skb, nexthdr);
}
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
}
PROG(IP)(struct __sk_buff *skb)
@@ -240,11 +257,11 @@ PROG(IP)(struct __sk_buff *skb)
iph = bpf_flow_dissect_get_header(skb, sizeof(*iph), &_iph);
if (!iph)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
/* IP header cannot be smaller than 20 bytes */
if (iph->ihl < 5)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
keys->addr_proto = ETH_P_IP;
keys->ipv4_src = iph->saddr;
@@ -252,7 +269,7 @@ PROG(IP)(struct __sk_buff *skb)
keys->thoff += iph->ihl << 2;
if (data + keys->thoff > data_end)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) {
keys->is_frag = true;
@@ -266,7 +283,7 @@ PROG(IP)(struct __sk_buff *skb)
}
if (done)
- return BPF_OK;
+ return export_flow_keys(keys, BPF_OK);
return parse_ip_proto(skb, iph->protocol);
}
@@ -278,7 +295,7 @@ PROG(IPV6)(struct __sk_buff *skb)
ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h);
if (!ip6h)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
keys->addr_proto = ETH_P_IPV6;
memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr));
@@ -290,11 +307,12 @@ PROG(IPV6)(struct __sk_buff *skb)
PROG(IPV6OP)(struct __sk_buff *skb)
{
+ struct bpf_flow_keys *keys = skb->flow_keys;
struct ipv6_opt_hdr *ip6h, _ip6h;
ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h);
if (!ip6h)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
/* hlen is in 8-octets and does not include the first 8 bytes
* of the header
@@ -311,7 +329,7 @@ PROG(IPV6FR)(struct __sk_buff *skb)
fragh = bpf_flow_dissect_get_header(skb, sizeof(*fragh), &_fragh);
if (!fragh)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
keys->thoff += sizeof(*fragh);
keys->is_frag = true;
@@ -323,48 +341,46 @@ PROG(IPV6FR)(struct __sk_buff *skb)
PROG(MPLS)(struct __sk_buff *skb)
{
+ struct bpf_flow_keys *keys = skb->flow_keys;
struct mpls_label *mpls, _mpls;
mpls = bpf_flow_dissect_get_header(skb, sizeof(*mpls), &_mpls);
if (!mpls)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
- return BPF_OK;
+ return export_flow_keys(keys, BPF_OK);
}
PROG(VLAN)(struct __sk_buff *skb)
{
struct bpf_flow_keys *keys = skb->flow_keys;
struct vlan_hdr *vlan, _vlan;
- __be16 proto;
-
- /* Peek back to see if single or double-tagging */
- if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto,
- sizeof(proto)))
- return BPF_DROP;
/* Account for double-tagging */
- if (proto == bpf_htons(ETH_P_8021AD)) {
+ if (keys->n_proto == bpf_htons(ETH_P_8021AD)) {
vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
if (!vlan)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
+ keys->nhoff += sizeof(*vlan);
keys->thoff += sizeof(*vlan);
}
vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
if (!vlan)
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
+ keys->nhoff += sizeof(*vlan);
keys->thoff += sizeof(*vlan);
/* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
- return BPF_DROP;
+ return export_flow_keys(keys, BPF_DROP);
+ keys->n_proto = vlan->h_vlan_encapsulated_proto;
return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto);
}
diff --git a/tools/testing/selftests/bpf/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c
index 1fd244d35ba9..1fd244d35ba9 100644
--- a/tools/testing/selftests/bpf/connect4_prog.c
+++ b/tools/testing/selftests/bpf/progs/connect4_prog.c
diff --git a/tools/testing/selftests/bpf/connect6_prog.c b/tools/testing/selftests/bpf/progs/connect6_prog.c
index 26397ab7b3c7..26397ab7b3c7 100644
--- a/tools/testing/selftests/bpf/connect6_prog.c
+++ b/tools/testing/selftests/bpf/progs/connect6_prog.c
diff --git a/tools/testing/selftests/bpf/dev_cgroup.c b/tools/testing/selftests/bpf/progs/dev_cgroup.c
index ce41a3475f27..ce41a3475f27 100644
--- a/tools/testing/selftests/bpf/dev_cgroup.c
+++ b/tools/testing/selftests/bpf/progs/dev_cgroup.c
diff --git a/tools/testing/selftests/bpf/get_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
index 014dba10b8a5..014dba10b8a5 100644
--- a/tools/testing/selftests/bpf/get_cgroup_id_kern.c
+++ b/tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c
diff --git a/tools/testing/selftests/bpf/netcnt_prog.c b/tools/testing/selftests/bpf/progs/netcnt_prog.c
index 9f741e69cebe..9f741e69cebe 100644
--- a/tools/testing/selftests/bpf/netcnt_prog.c
+++ b/tools/testing/selftests/bpf/progs/netcnt_prog.c
diff --git a/tools/testing/selftests/bpf/sample_map_ret0.c b/tools/testing/selftests/bpf/progs/sample_map_ret0.c
index 0756303676ac..0756303676ac 100644
--- a/tools/testing/selftests/bpf/sample_map_ret0.c
+++ b/tools/testing/selftests/bpf/progs/sample_map_ret0.c
diff --git a/tools/testing/selftests/bpf/sample_ret0.c b/tools/testing/selftests/bpf/progs/sample_ret0.c
index fec99750d6ea..fec99750d6ea 100644
--- a/tools/testing/selftests/bpf/sample_ret0.c
+++ b/tools/testing/selftests/bpf/progs/sample_ret0.c
diff --git a/tools/testing/selftests/bpf/sendmsg4_prog.c b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c
index a91536b1c47e..a91536b1c47e 100644
--- a/tools/testing/selftests/bpf/sendmsg4_prog.c
+++ b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c
diff --git a/tools/testing/selftests/bpf/sendmsg6_prog.c b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
index 5aeaa284fc47..5aeaa284fc47 100644
--- a/tools/testing/selftests/bpf/sendmsg6_prog.c
+++ b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
diff --git a/tools/testing/selftests/bpf/socket_cookie_prog.c b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c
index 9ff8ac4b0bf6..9ff8ac4b0bf6 100644
--- a/tools/testing/selftests/bpf/socket_cookie_prog.c
+++ b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c
diff --git a/tools/testing/selftests/bpf/sockmap_parse_prog.c b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
index 0f92858f6226..0f92858f6226 100644
--- a/tools/testing/selftests/bpf/sockmap_parse_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
diff --git a/tools/testing/selftests/bpf/sockmap_tcp_msg_prog.c b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
index 12a7b5c82ed6..12a7b5c82ed6 100644
--- a/tools/testing/selftests/bpf/sockmap_tcp_msg_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
diff --git a/tools/testing/selftests/bpf/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
index 2ce7634a4012..2ce7634a4012 100644
--- a/tools/testing/selftests/bpf/sockmap_verdict_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
diff --git a/tools/testing/selftests/bpf/test_adjust_tail.c b/tools/testing/selftests/bpf/progs/test_adjust_tail.c
index 4cd5e860c903..4cd5e860c903 100644
--- a/tools/testing/selftests/bpf/test_adjust_tail.c
+++ b/tools/testing/selftests/bpf/progs/test_adjust_tail.c
diff --git a/tools/testing/selftests/bpf/test_btf_haskv.c b/tools/testing/selftests/bpf/progs/test_btf_haskv.c
index e5c79fe0ffdb..e5c79fe0ffdb 100644
--- a/tools/testing/selftests/bpf/test_btf_haskv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_haskv.c
diff --git a/tools/testing/selftests/bpf/test_btf_nokv.c b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
index 434188c37774..434188c37774 100644
--- a/tools/testing/selftests/bpf/test_btf_nokv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_nokv.c
diff --git a/tools/testing/selftests/bpf/test_get_stack_rawtp.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
index f6d9f238e00a..f6d9f238e00a 100644
--- a/tools/testing/selftests/bpf/test_get_stack_rawtp.c
+++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
diff --git a/tools/testing/selftests/bpf/progs/test_global_data.c b/tools/testing/selftests/bpf/progs/test_global_data.c
new file mode 100644
index 000000000000..5ab14e941980
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_data.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Isovalent, Inc.
+
+#include <linux/bpf.h>
+#include <linux/pkt_cls.h>
+#include <string.h>
+
+#include "bpf_helpers.h"
+
+struct bpf_map_def SEC("maps") result_number = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u64),
+ .max_entries = 11,
+};
+
+struct bpf_map_def SEC("maps") result_string = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = 32,
+ .max_entries = 5,
+};
+
+struct foo {
+ __u8 a;
+ __u32 b;
+ __u64 c;
+};
+
+struct bpf_map_def SEC("maps") result_struct = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct foo),
+ .max_entries = 5,
+};
+
+/* Relocation tests for __u64s. */
+static __u64 num0;
+static __u64 num1 = 42;
+static const __u64 num2 = 24;
+static __u64 num3 = 0;
+static __u64 num4 = 0xffeeff;
+static const __u64 num5 = 0xabab;
+static const __u64 num6 = 0xab;
+
+/* Relocation tests for strings. */
+static const char str0[32] = "abcdefghijklmnopqrstuvwxyz";
+static char str1[32] = "abcdefghijklmnopqrstuvwxyz";
+static char str2[32];
+
+/* Relocation tests for structs. */
+static const struct foo struct0 = {
+ .a = 42,
+ .b = 0xfefeefef,
+ .c = 0x1111111111111111ULL,
+};
+static struct foo struct1;
+static const struct foo struct2;
+static struct foo struct3 = {
+ .a = 41,
+ .b = 0xeeeeefef,
+ .c = 0x2111111111111111ULL,
+};
+
+#define test_reloc(map, num, var) \
+ do { \
+ __u32 key = num; \
+ bpf_map_update_elem(&result_##map, &key, var, 0); \
+ } while (0)
+
+SEC("static_data_load")
+int load_static_data(struct __sk_buff *skb)
+{
+ static const __u64 bar = ~0;
+
+ test_reloc(number, 0, &num0);
+ test_reloc(number, 1, &num1);
+ test_reloc(number, 2, &num2);
+ test_reloc(number, 3, &num3);
+ test_reloc(number, 4, &num4);
+ test_reloc(number, 5, &num5);
+ num4 = 1234;
+ test_reloc(number, 6, &num4);
+ test_reloc(number, 7, &num0);
+ test_reloc(number, 8, &num6);
+
+ test_reloc(string, 0, str0);
+ test_reloc(string, 1, str1);
+ test_reloc(string, 2, str2);
+ str1[5] = 'x';
+ test_reloc(string, 3, str1);
+ __builtin_memcpy(&str2[2], "hello", sizeof("hello"));
+ test_reloc(string, 4, str2);
+
+ test_reloc(struct, 0, &struct0);
+ test_reloc(struct, 1, &struct1);
+ test_reloc(struct, 2, &struct2);
+ test_reloc(struct, 3, &struct3);
+
+ test_reloc(number, 9, &struct0.c);
+ test_reloc(number, 10, &bar);
+
+ return TC_ACT_OK;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_jhash.h b/tools/testing/selftests/bpf/progs/test_jhash.h
new file mode 100644
index 000000000000..3d12c11a8d47
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_jhash.h
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+typedef unsigned int u32;
+
+static __attribute__((always_inline)) u32 rol32(u32 word, unsigned int shift)
+{
+ return (word << shift) | (word >> ((-shift) & 31));
+}
+
+#define __jhash_mix(a, b, c) \
+{ \
+ a -= c; a ^= rol32(c, 4); c += b; \
+ b -= a; b ^= rol32(a, 6); a += c; \
+ c -= b; c ^= rol32(b, 8); b += a; \
+ a -= c; a ^= rol32(c, 16); c += b; \
+ b -= a; b ^= rol32(a, 19); a += c; \
+ c -= b; c ^= rol32(b, 4); b += a; \
+}
+
+#define __jhash_final(a, b, c) \
+{ \
+ c ^= b; c -= rol32(b, 14); \
+ a ^= c; a -= rol32(c, 11); \
+ b ^= a; b -= rol32(a, 25); \
+ c ^= b; c -= rol32(b, 16); \
+ a ^= c; a -= rol32(c, 4); \
+ b ^= a; b -= rol32(a, 14); \
+ c ^= b; c -= rol32(b, 24); \
+}
+
+#define JHASH_INITVAL 0xdeadbeef
+
+static ATTR
+u32 jhash(const void *key, u32 length, u32 initval)
+{
+ u32 a, b, c;
+ const unsigned char *k = key;
+
+ a = b = c = JHASH_INITVAL + length + initval;
+
+ while (length > 12) {
+ a += *(volatile u32 *)(k);
+ b += *(volatile u32 *)(k + 4);
+ c += *(volatile u32 *)(k + 8);
+ __jhash_mix(a, b, c);
+ length -= 12;
+ k += 12;
+ }
+ switch (length) {
+ case 12: c += (u32)k[11]<<24;
+ case 11: c += (u32)k[10]<<16;
+ case 10: c += (u32)k[9]<<8;
+ case 9: c += k[8];
+ case 8: b += (u32)k[7]<<24;
+ case 7: b += (u32)k[6]<<16;
+ case 6: b += (u32)k[5]<<8;
+ case 5: b += k[4];
+ case 4: a += (u32)k[3]<<24;
+ case 3: a += (u32)k[2]<<16;
+ case 2: a += (u32)k[1]<<8;
+ case 1: a += k[0];
+ c ^= a;
+ __jhash_final(a, b, c);
+ case 0: /* Nothing left to add */
+ break;
+ }
+
+ return c;
+}
diff --git a/tools/testing/selftests/bpf/test_l4lb.c b/tools/testing/selftests/bpf/progs/test_l4lb.c
index 1e10c9590991..1e10c9590991 100644
--- a/tools/testing/selftests/bpf/test_l4lb.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb.c
diff --git a/tools/testing/selftests/bpf/test_l4lb_noinline.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
index ba44a14e6dc4..ba44a14e6dc4 100644
--- a/tools/testing/selftests/bpf/test_l4lb_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
diff --git a/tools/testing/selftests/bpf/test_lirc_mode2_kern.c b/tools/testing/selftests/bpf/progs/test_lirc_mode2_kern.c
index 4147130cc3b7..4147130cc3b7 100644
--- a/tools/testing/selftests/bpf/test_lirc_mode2_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_lirc_mode2_kern.c
diff --git a/tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c b/tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c
new file mode 100644
index 000000000000..c957d6dfe6d7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_lwt_ip_encap.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stddef.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+
+struct grehdr {
+ __be16 flags;
+ __be16 protocol;
+};
+
+SEC("encap_gre")
+int bpf_lwt_encap_gre(struct __sk_buff *skb)
+{
+ struct encap_hdr {
+ struct iphdr iph;
+ struct grehdr greh;
+ } hdr;
+ int err;
+
+ memset(&hdr, 0, sizeof(struct encap_hdr));
+
+ hdr.iph.ihl = 5;
+ hdr.iph.version = 4;
+ hdr.iph.ttl = 0x40;
+ hdr.iph.protocol = 47; /* IPPROTO_GRE */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ hdr.iph.saddr = 0x640110ac; /* 172.16.1.100 */
+ hdr.iph.daddr = 0x641010ac; /* 172.16.16.100 */
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ hdr.iph.saddr = 0xac100164; /* 172.16.1.100 */
+ hdr.iph.daddr = 0xac101064; /* 172.16.16.100 */
+#else
+#error "Fix your compiler's __BYTE_ORDER__?!"
+#endif
+ hdr.iph.tot_len = bpf_htons(skb->len + sizeof(struct encap_hdr));
+
+ hdr.greh.protocol = skb->protocol;
+
+ err = bpf_lwt_push_encap(skb, BPF_LWT_ENCAP_IP, &hdr,
+ sizeof(struct encap_hdr));
+ if (err)
+ return BPF_DROP;
+
+ return BPF_LWT_REROUTE;
+}
+
+SEC("encap_gre6")
+int bpf_lwt_encap_gre6(struct __sk_buff *skb)
+{
+ struct encap_hdr {
+ struct ipv6hdr ip6hdr;
+ struct grehdr greh;
+ } hdr;
+ int err;
+
+ memset(&hdr, 0, sizeof(struct encap_hdr));
+
+ hdr.ip6hdr.version = 6;
+ hdr.ip6hdr.payload_len = bpf_htons(skb->len + sizeof(struct grehdr));
+ hdr.ip6hdr.nexthdr = 47; /* IPPROTO_GRE */
+ hdr.ip6hdr.hop_limit = 0x40;
+ /* fb01::1 */
+ hdr.ip6hdr.saddr.s6_addr[0] = 0xfb;
+ hdr.ip6hdr.saddr.s6_addr[1] = 1;
+ hdr.ip6hdr.saddr.s6_addr[15] = 1;
+ /* fb10::1 */
+ hdr.ip6hdr.daddr.s6_addr[0] = 0xfb;
+ hdr.ip6hdr.daddr.s6_addr[1] = 0x10;
+ hdr.ip6hdr.daddr.s6_addr[15] = 1;
+
+ hdr.greh.protocol = skb->protocol;
+
+ err = bpf_lwt_push_encap(skb, BPF_LWT_ENCAP_IP, &hdr,
+ sizeof(struct encap_hdr));
+ if (err)
+ return BPF_DROP;
+
+ return BPF_LWT_REROUTE;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.c b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
index 0575751bc1bc..0575751bc1bc 100644
--- a/tools/testing/selftests/bpf/test_lwt_seg6local.c
+++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
diff --git a/tools/testing/selftests/bpf/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c
index ce923e67e08e..2985f262846e 100644
--- a/tools/testing/selftests/bpf/test_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c
@@ -27,6 +27,7 @@ SEC("xdp_mimtest")
int xdp_mimtest0(struct xdp_md *ctx)
{
int value = 123;
+ int *value_p;
int key = 0;
void *map;
@@ -35,6 +36,9 @@ int xdp_mimtest0(struct xdp_md *ctx)
return XDP_DROP;
bpf_map_update_elem(map, &key, &value, 0);
+ value_p = bpf_map_lookup_elem(map, &key);
+ if (!value_p || *value_p != 123)
+ return XDP_DROP;
map = bpf_map_lookup_elem(&mim_hash, &key);
if (!map)
diff --git a/tools/testing/selftests/bpf/progs/test_map_lock.c b/tools/testing/selftests/bpf/progs/test_map_lock.c
new file mode 100644
index 000000000000..af8cc68ed2f9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_map_lock.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <linux/bpf.h>
+#include <linux/version.h>
+#include "bpf_helpers.h"
+
+#define VAR_NUM 16
+
+struct hmap_elem {
+ struct bpf_spin_lock lock;
+ int var[VAR_NUM];
+};
+
+struct bpf_map_def SEC("maps") hash_map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct hmap_elem),
+ .max_entries = 1,
+};
+
+BPF_ANNOTATE_KV_PAIR(hash_map, int, struct hmap_elem);
+
+struct array_elem {
+ struct bpf_spin_lock lock;
+ int var[VAR_NUM];
+};
+
+struct bpf_map_def SEC("maps") array_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct array_elem),
+ .max_entries = 1,
+};
+
+BPF_ANNOTATE_KV_PAIR(array_map, int, struct array_elem);
+
+SEC("map_lock_demo")
+int bpf_map_lock_test(struct __sk_buff *skb)
+{
+ struct hmap_elem zero = {}, *val;
+ int rnd = bpf_get_prandom_u32();
+ int key = 0, err = 1, i;
+ struct array_elem *q;
+
+ val = bpf_map_lookup_elem(&hash_map, &key);
+ if (!val)
+ goto err;
+ /* spin_lock in hash map */
+ bpf_spin_lock(&val->lock);
+ for (i = 0; i < VAR_NUM; i++)
+ val->var[i] = rnd;
+ bpf_spin_unlock(&val->lock);
+
+ /* spin_lock in array */
+ q = bpf_map_lookup_elem(&array_map, &key);
+ if (!q)
+ goto err;
+ bpf_spin_lock(&q->lock);
+ for (i = 0; i < VAR_NUM; i++)
+ q->var[i] = rnd;
+ bpf_spin_unlock(&q->lock);
+ err = 0;
+err:
+ return err;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_obj_id.c b/tools/testing/selftests/bpf/progs/test_obj_id.c
index 880d2963b472..726340fa6fe0 100644
--- a/tools/testing/selftests/bpf/test_obj_id.c
+++ b/tools/testing/selftests/bpf/progs/test_obj_id.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017 Facebook
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
*/
#include <stddef.h>
#include <linux/bpf.h>
diff --git a/tools/testing/selftests/bpf/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c
index 6e11ba11709e..7cf42d14103f 100644
--- a/tools/testing/selftests/bpf/test_pkt_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017 Facebook
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
*/
#include <stddef.h>
#include <string.h>
diff --git a/tools/testing/selftests/bpf/test_pkt_md_access.c b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
index 7956302ecdf2..3d039e18bf82 100644
--- a/tools/testing/selftests/bpf/test_pkt_md_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_md_access.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017 Facebook
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
*/
#include <stddef.h>
#include <string.h>
diff --git a/tools/testing/selftests/bpf/test_queue_map.c b/tools/testing/selftests/bpf/progs/test_queue_map.c
index 87db1f9da33d..87db1f9da33d 100644
--- a/tools/testing/selftests/bpf/test_queue_map.c
+++ b/tools/testing/selftests/bpf/progs/test_queue_map.c
diff --git a/tools/testing/selftests/bpf/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
index 5b54ec637ada..5b54ec637ada 100644
--- a/tools/testing/selftests/bpf/test_select_reuseport_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
diff --git a/tools/testing/selftests/bpf/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
index e21cd736c196..e21cd736c196 100644
--- a/tools/testing/selftests/bpf/test_sk_lookup_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
index 68cf9829f5a7..68cf9829f5a7 100644
--- a/tools/testing/selftests/bpf/test_skb_cgroup_id_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c
diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
new file mode 100644
index 000000000000..7a80960d7df1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_skb_ctx.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+int _version SEC("version") = 1;
+char _license[] SEC("license") = "GPL";
+
+SEC("skb_ctx")
+int process(struct __sk_buff *skb)
+{
+ #pragma clang loop unroll(full)
+ for (int i = 0; i < 5; i++) {
+ if (skb->cb[i] != i + 1)
+ return 1;
+ skb->cb[i]++;
+ }
+ skb->priority++;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
new file mode 100644
index 000000000000..1c39e4ccb7f1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+
+#include <linux/bpf.h>
+#include <netinet/in.h>
+#include <stdbool.h>
+
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+
+enum bpf_addr_array_idx {
+ ADDR_SRV_IDX,
+ ADDR_CLI_IDX,
+ __NR_BPF_ADDR_ARRAY_IDX,
+};
+
+enum bpf_result_array_idx {
+ EGRESS_SRV_IDX,
+ EGRESS_CLI_IDX,
+ INGRESS_LISTEN_IDX,
+ __NR_BPF_RESULT_ARRAY_IDX,
+};
+
+enum bpf_linum_array_idx {
+ EGRESS_LINUM_IDX,
+ INGRESS_LINUM_IDX,
+ __NR_BPF_LINUM_ARRAY_IDX,
+};
+
+struct bpf_map_def SEC("maps") addr_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct sockaddr_in6),
+ .max_entries = __NR_BPF_ADDR_ARRAY_IDX,
+};
+
+struct bpf_map_def SEC("maps") sock_result_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct bpf_sock),
+ .max_entries = __NR_BPF_RESULT_ARRAY_IDX,
+};
+
+struct bpf_map_def SEC("maps") tcp_sock_result_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct bpf_tcp_sock),
+ .max_entries = __NR_BPF_RESULT_ARRAY_IDX,
+};
+
+struct bpf_map_def SEC("maps") linum_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = __NR_BPF_LINUM_ARRAY_IDX,
+};
+
+struct bpf_spinlock_cnt {
+ struct bpf_spin_lock lock;
+ __u32 cnt;
+};
+
+struct bpf_map_def SEC("maps") sk_pkt_out_cnt = {
+ .type = BPF_MAP_TYPE_SK_STORAGE,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct bpf_spinlock_cnt),
+ .max_entries = 0,
+ .map_flags = BPF_F_NO_PREALLOC,
+};
+
+BPF_ANNOTATE_KV_PAIR(sk_pkt_out_cnt, int, struct bpf_spinlock_cnt);
+
+struct bpf_map_def SEC("maps") sk_pkt_out_cnt10 = {
+ .type = BPF_MAP_TYPE_SK_STORAGE,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct bpf_spinlock_cnt),
+ .max_entries = 0,
+ .map_flags = BPF_F_NO_PREALLOC,
+};
+
+BPF_ANNOTATE_KV_PAIR(sk_pkt_out_cnt10, int, struct bpf_spinlock_cnt);
+
+static bool is_loopback6(__u32 *a6)
+{
+ return !a6[0] && !a6[1] && !a6[2] && a6[3] == bpf_htonl(1);
+}
+
+static void skcpy(struct bpf_sock *dst,
+ const struct bpf_sock *src)
+{
+ dst->bound_dev_if = src->bound_dev_if;
+ dst->family = src->family;
+ dst->type = src->type;
+ dst->protocol = src->protocol;
+ dst->mark = src->mark;
+ dst->priority = src->priority;
+ dst->src_ip4 = src->src_ip4;
+ dst->src_ip6[0] = src->src_ip6[0];
+ dst->src_ip6[1] = src->src_ip6[1];
+ dst->src_ip6[2] = src->src_ip6[2];
+ dst->src_ip6[3] = src->src_ip6[3];
+ dst->src_port = src->src_port;
+ dst->dst_ip4 = src->dst_ip4;
+ dst->dst_ip6[0] = src->dst_ip6[0];
+ dst->dst_ip6[1] = src->dst_ip6[1];
+ dst->dst_ip6[2] = src->dst_ip6[2];
+ dst->dst_ip6[3] = src->dst_ip6[3];
+ dst->dst_port = src->dst_port;
+ dst->state = src->state;
+}
+
+static void tpcpy(struct bpf_tcp_sock *dst,
+ const struct bpf_tcp_sock *src)
+{
+ dst->snd_cwnd = src->snd_cwnd;
+ dst->srtt_us = src->srtt_us;
+ dst->rtt_min = src->rtt_min;
+ dst->snd_ssthresh = src->snd_ssthresh;
+ dst->rcv_nxt = src->rcv_nxt;
+ dst->snd_nxt = src->snd_nxt;
+ dst->snd_una = src->snd_una;
+ dst->mss_cache = src->mss_cache;
+ dst->ecn_flags = src->ecn_flags;
+ dst->rate_delivered = src->rate_delivered;
+ dst->rate_interval_us = src->rate_interval_us;
+ dst->packets_out = src->packets_out;
+ dst->retrans_out = src->retrans_out;
+ dst->total_retrans = src->total_retrans;
+ dst->segs_in = src->segs_in;
+ dst->data_segs_in = src->data_segs_in;
+ dst->segs_out = src->segs_out;
+ dst->data_segs_out = src->data_segs_out;
+ dst->lost_out = src->lost_out;
+ dst->sacked_out = src->sacked_out;
+ dst->bytes_received = src->bytes_received;
+ dst->bytes_acked = src->bytes_acked;
+}
+
+#define RETURN { \
+ linum = __LINE__; \
+ bpf_map_update_elem(&linum_map, &linum_idx, &linum, 0); \
+ return 1; \
+}
+
+SEC("cgroup_skb/egress")
+int egress_read_sock_fields(struct __sk_buff *skb)
+{
+ struct bpf_spinlock_cnt cli_cnt_init = { .lock = 0, .cnt = 0xeB9F };
+ __u32 srv_idx = ADDR_SRV_IDX, cli_idx = ADDR_CLI_IDX, result_idx;
+ struct bpf_spinlock_cnt *pkt_out_cnt, *pkt_out_cnt10;
+ struct sockaddr_in6 *srv_sa6, *cli_sa6;
+ struct bpf_tcp_sock *tp, *tp_ret;
+ struct bpf_sock *sk, *sk_ret;
+ __u32 linum, linum_idx;
+
+ linum_idx = EGRESS_LINUM_IDX;
+
+ sk = skb->sk;
+ if (!sk || sk->state == 10)
+ RETURN;
+
+ sk = bpf_sk_fullsock(sk);
+ if (!sk || sk->family != AF_INET6 || sk->protocol != IPPROTO_TCP ||
+ !is_loopback6(sk->src_ip6))
+ RETURN;
+
+ tp = bpf_tcp_sock(sk);
+ if (!tp)
+ RETURN;
+
+ srv_sa6 = bpf_map_lookup_elem(&addr_map, &srv_idx);
+ cli_sa6 = bpf_map_lookup_elem(&addr_map, &cli_idx);
+ if (!srv_sa6 || !cli_sa6)
+ RETURN;
+
+ if (sk->src_port == bpf_ntohs(srv_sa6->sin6_port))
+ result_idx = EGRESS_SRV_IDX;
+ else if (sk->src_port == bpf_ntohs(cli_sa6->sin6_port))
+ result_idx = EGRESS_CLI_IDX;
+ else
+ RETURN;
+
+ sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx);
+ tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx);
+ if (!sk_ret || !tp_ret)
+ RETURN;
+
+ skcpy(sk_ret, sk);
+ tpcpy(tp_ret, tp);
+
+ if (result_idx == EGRESS_SRV_IDX) {
+ /* The userspace has created it for srv sk */
+ pkt_out_cnt = bpf_sk_storage_get(&sk_pkt_out_cnt, sk, 0, 0);
+ pkt_out_cnt10 = bpf_sk_storage_get(&sk_pkt_out_cnt10, sk,
+ 0, 0);
+ } else {
+ pkt_out_cnt = bpf_sk_storage_get(&sk_pkt_out_cnt, sk,
+ &cli_cnt_init,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ pkt_out_cnt10 = bpf_sk_storage_get(&sk_pkt_out_cnt10,
+ sk, &cli_cnt_init,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ }
+
+ if (!pkt_out_cnt || !pkt_out_cnt10)
+ RETURN;
+
+ /* Even both cnt and cnt10 have lock defined in their BTF,
+ * intentionally one cnt takes lock while one does not
+ * as a test for the spinlock support in BPF_MAP_TYPE_SK_STORAGE.
+ */
+ pkt_out_cnt->cnt += 1;
+ bpf_spin_lock(&pkt_out_cnt10->lock);
+ pkt_out_cnt10->cnt += 10;
+ bpf_spin_unlock(&pkt_out_cnt10->lock);
+
+ RETURN;
+}
+
+SEC("cgroup_skb/ingress")
+int ingress_read_sock_fields(struct __sk_buff *skb)
+{
+ __u32 srv_idx = ADDR_SRV_IDX, result_idx = INGRESS_LISTEN_IDX;
+ struct bpf_tcp_sock *tp, *tp_ret;
+ struct bpf_sock *sk, *sk_ret;
+ struct sockaddr_in6 *srv_sa6;
+ __u32 linum, linum_idx;
+
+ linum_idx = INGRESS_LINUM_IDX;
+
+ sk = skb->sk;
+ if (!sk || sk->family != AF_INET6 || !is_loopback6(sk->src_ip6))
+ RETURN;
+
+ srv_sa6 = bpf_map_lookup_elem(&addr_map, &srv_idx);
+ if (!srv_sa6 || sk->src_port != bpf_ntohs(srv_sa6->sin6_port))
+ RETURN;
+
+ if (sk->state != 10 && sk->state != 12)
+ RETURN;
+
+ sk = bpf_get_listener_sock(sk);
+ if (!sk)
+ RETURN;
+
+ tp = bpf_tcp_sock(sk);
+ if (!tp)
+ RETURN;
+
+ sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx);
+ tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx);
+ if (!sk_ret || !tp_ret)
+ RETURN;
+
+ skcpy(sk_ret, sk);
+ tpcpy(tp_ret, tp);
+
+ RETURN;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_sockhash_kern.c b/tools/testing/selftests/bpf/progs/test_sockhash_kern.c
index e6755916442a..e6755916442a 100644
--- a/tools/testing/selftests/bpf/test_sockhash_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sockhash_kern.c
diff --git a/tools/testing/selftests/bpf/test_sockmap_kern.c b/tools/testing/selftests/bpf/progs/test_sockmap_kern.c
index 677b2ed1cc1e..677b2ed1cc1e 100644
--- a/tools/testing/selftests/bpf/test_sockmap_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.c
diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c
new file mode 100644
index 000000000000..40f904312090
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <linux/bpf.h>
+#include <linux/version.h>
+#include "bpf_helpers.h"
+
+struct hmap_elem {
+ volatile int cnt;
+ struct bpf_spin_lock lock;
+ int test_padding;
+};
+
+struct bpf_map_def SEC("maps") hmap = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct hmap_elem),
+ .max_entries = 1,
+};
+
+BPF_ANNOTATE_KV_PAIR(hmap, int, struct hmap_elem);
+
+
+struct cls_elem {
+ struct bpf_spin_lock lock;
+ volatile int cnt;
+};
+
+struct bpf_map_def SEC("maps") cls_map = {
+ .type = BPF_MAP_TYPE_CGROUP_STORAGE,
+ .key_size = sizeof(struct bpf_cgroup_storage_key),
+ .value_size = sizeof(struct cls_elem),
+};
+
+BPF_ANNOTATE_KV_PAIR(cls_map, struct bpf_cgroup_storage_key,
+ struct cls_elem);
+
+struct bpf_vqueue {
+ struct bpf_spin_lock lock;
+ /* 4 byte hole */
+ unsigned long long lasttime;
+ int credit;
+ unsigned int rate;
+};
+
+struct bpf_map_def SEC("maps") vqueue = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct bpf_vqueue),
+ .max_entries = 1,
+};
+
+BPF_ANNOTATE_KV_PAIR(vqueue, int, struct bpf_vqueue);
+#define CREDIT_PER_NS(delta, rate) (((delta) * rate) >> 20)
+
+SEC("spin_lock_demo")
+int bpf_sping_lock_test(struct __sk_buff *skb)
+{
+ volatile int credit = 0, max_credit = 100, pkt_len = 64;
+ struct hmap_elem zero = {}, *val;
+ unsigned long long curtime;
+ struct bpf_vqueue *q;
+ struct cls_elem *cls;
+ int key = 0;
+ int err = 0;
+
+ val = bpf_map_lookup_elem(&hmap, &key);
+ if (!val) {
+ bpf_map_update_elem(&hmap, &key, &zero, 0);
+ val = bpf_map_lookup_elem(&hmap, &key);
+ if (!val) {
+ err = 1;
+ goto err;
+ }
+ }
+ /* spin_lock in hash map run time test */
+ bpf_spin_lock(&val->lock);
+ if (val->cnt)
+ val->cnt--;
+ else
+ val->cnt++;
+ if (val->cnt != 0 && val->cnt != 1)
+ err = 1;
+ bpf_spin_unlock(&val->lock);
+
+ /* spin_lock in array. virtual queue demo */
+ q = bpf_map_lookup_elem(&vqueue, &key);
+ if (!q)
+ goto err;
+ curtime = bpf_ktime_get_ns();
+ bpf_spin_lock(&q->lock);
+ q->credit += CREDIT_PER_NS(curtime - q->lasttime, q->rate);
+ q->lasttime = curtime;
+ if (q->credit > max_credit)
+ q->credit = max_credit;
+ q->credit -= pkt_len;
+ credit = q->credit;
+ bpf_spin_unlock(&q->lock);
+
+ /* spin_lock in cgroup local storage */
+ cls = bpf_get_local_storage(&cls_map, 0);
+ bpf_spin_lock(&cls->lock);
+ cls->cnt++;
+ bpf_spin_unlock(&cls->lock);
+
+err:
+ return err;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_stack_map.c b/tools/testing/selftests/bpf/progs/test_stack_map.c
index 31c3880e6da0..31c3880e6da0 100644
--- a/tools/testing/selftests/bpf/test_stack_map.c
+++ b/tools/testing/selftests/bpf/progs/test_stack_map.c
diff --git a/tools/testing/selftests/bpf/test_stacktrace_build_id.c b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
index d86c281e957f..d86c281e957f 100644
--- a/tools/testing/selftests/bpf/test_stacktrace_build_id.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
diff --git a/tools/testing/selftests/bpf/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
index af111af7ca1a..af111af7ca1a 100644
--- a/tools/testing/selftests/bpf/test_stacktrace_map.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
new file mode 100644
index 000000000000..a295cad805d7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <stdint.h>
+#include <string.h>
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+
+#include "bpf_helpers.h"
+#include "bpf_util.h"
+
+/* Max supported length of a string with unsigned long in base 10 (pow2 - 1). */
+#define MAX_ULONG_STR_LEN 0xF
+
+/* Max supported length of sysctl value string (pow2). */
+#define MAX_VALUE_STR_LEN 0x40
+
+static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
+{
+ char tcp_mem_name[] = "net/ipv4/tcp_mem";
+ unsigned char i;
+ char name[64];
+ int ret;
+
+ memset(name, 0, sizeof(name));
+ ret = bpf_sysctl_get_name(ctx, name, sizeof(name), 0);
+ if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
+ return 0;
+
+#pragma clang loop unroll(full)
+ for (i = 0; i < sizeof(tcp_mem_name); ++i)
+ if (name[i] != tcp_mem_name[i])
+ return 0;
+
+ return 1;
+}
+
+SEC("cgroup/sysctl")
+int sysctl_tcp_mem(struct bpf_sysctl *ctx)
+{
+ unsigned long tcp_mem[3] = {0, 0, 0};
+ char value[MAX_VALUE_STR_LEN];
+ unsigned char i, off = 0;
+ int ret;
+
+ if (ctx->write)
+ return 0;
+
+ if (!is_tcp_mem(ctx))
+ return 0;
+
+ ret = bpf_sysctl_get_current_value(ctx, value, MAX_VALUE_STR_LEN);
+ if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
+ return 0;
+
+#pragma clang loop unroll(full)
+ for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
+ ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
+ tcp_mem + i);
+ if (ret <= 0 || ret > MAX_ULONG_STR_LEN)
+ return 0;
+ off += ret & MAX_ULONG_STR_LEN;
+ }
+
+
+ return tcp_mem[0] < tcp_mem[1] && tcp_mem[1] < tcp_mem[2];
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_edt.c b/tools/testing/selftests/bpf/progs/test_tc_edt.c
new file mode 100644
index 000000000000..3af64c470d64
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tc_edt.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdint.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/pkt_cls.h>
+#include <linux/tcp.h>
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+
+/* the maximum delay we are willing to add (drop packets beyond that) */
+#define TIME_HORIZON_NS (2000 * 1000 * 1000)
+#define NS_PER_SEC 1000000000
+#define ECN_HORIZON_NS 5000000
+#define THROTTLE_RATE_BPS (5 * 1000 * 1000)
+
+/* flow_key => last_tstamp timestamp used */
+struct bpf_map_def SEC("maps") flow_map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(uint32_t),
+ .value_size = sizeof(uint64_t),
+ .max_entries = 1,
+};
+
+static inline int throttle_flow(struct __sk_buff *skb)
+{
+ int key = 0;
+ uint64_t *last_tstamp = bpf_map_lookup_elem(&flow_map, &key);
+ uint64_t delay_ns = ((uint64_t)skb->len) * NS_PER_SEC /
+ THROTTLE_RATE_BPS;
+ uint64_t now = bpf_ktime_get_ns();
+ uint64_t tstamp, next_tstamp = 0;
+
+ if (last_tstamp)
+ next_tstamp = *last_tstamp + delay_ns;
+
+ tstamp = skb->tstamp;
+ if (tstamp < now)
+ tstamp = now;
+
+ /* should we throttle? */
+ if (next_tstamp <= tstamp) {
+ if (bpf_map_update_elem(&flow_map, &key, &tstamp, BPF_ANY))
+ return TC_ACT_SHOT;
+ return TC_ACT_OK;
+ }
+
+ /* do not queue past the time horizon */
+ if (next_tstamp - now >= TIME_HORIZON_NS)
+ return TC_ACT_SHOT;
+
+ /* set ecn bit, if needed */
+ if (next_tstamp - now >= ECN_HORIZON_NS)
+ bpf_skb_ecn_set_ce(skb);
+
+ if (bpf_map_update_elem(&flow_map, &key, &next_tstamp, BPF_EXIST))
+ return TC_ACT_SHOT;
+ skb->tstamp = next_tstamp;
+
+ return TC_ACT_OK;
+}
+
+static inline int handle_tcp(struct __sk_buff *skb, struct tcphdr *tcp)
+{
+ void *data_end = (void *)(long)skb->data_end;
+
+ /* drop malformed packets */
+ if ((void *)(tcp + 1) > data_end)
+ return TC_ACT_SHOT;
+
+ if (tcp->dest == bpf_htons(9000))
+ return throttle_flow(skb);
+
+ return TC_ACT_OK;
+}
+
+static inline int handle_ipv4(struct __sk_buff *skb)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *data = (void *)(long)skb->data;
+ struct iphdr *iph;
+ uint32_t ihl;
+
+ /* drop malformed packets */
+ if (data + sizeof(struct ethhdr) > data_end)
+ return TC_ACT_SHOT;
+ iph = (struct iphdr *)(data + sizeof(struct ethhdr));
+ if ((void *)(iph + 1) > data_end)
+ return TC_ACT_SHOT;
+ ihl = iph->ihl * 4;
+ if (((void *)iph) + ihl > data_end)
+ return TC_ACT_SHOT;
+
+ if (iph->protocol == IPPROTO_TCP)
+ return handle_tcp(skb, (struct tcphdr *)(((void *)iph) + ihl));
+
+ return TC_ACT_OK;
+}
+
+SEC("cls_test") int tc_prog(struct __sk_buff *skb)
+{
+ if (skb->protocol == bpf_htons(ETH_P_IP))
+ return handle_ipv4(skb);
+
+ return TC_ACT_OK;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
new file mode 100644
index 000000000000..74370e7e286d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* In-place tunneling */
+
+#include <stdbool.h>
+#include <string.h>
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/mpls.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/pkt_cls.h>
+#include <linux/types.h>
+
+#include "bpf_endian.h"
+#include "bpf_helpers.h"
+
+static const int cfg_port = 8000;
+
+static const int cfg_udp_src = 20000;
+
+#define UDP_PORT 5555
+#define MPLS_OVER_UDP_PORT 6635
+#define ETH_OVER_UDP_PORT 7777
+
+/* MPLS label 1000 with S bit (last label) set and ttl of 255. */
+static const __u32 mpls_label = __bpf_constant_htonl(1000 << 12 |
+ MPLS_LS_S_MASK | 0xff);
+
+struct gre_hdr {
+ __be16 flags;
+ __be16 protocol;
+} __attribute__((packed));
+
+union l4hdr {
+ struct udphdr udp;
+ struct gre_hdr gre;
+};
+
+struct v4hdr {
+ struct iphdr ip;
+ union l4hdr l4hdr;
+ __u8 pad[16]; /* enough space for L2 header */
+} __attribute__((packed));
+
+struct v6hdr {
+ struct ipv6hdr ip;
+ union l4hdr l4hdr;
+ __u8 pad[16]; /* enough space for L2 header */
+} __attribute__((packed));
+
+static __always_inline void set_ipv4_csum(struct iphdr *iph)
+{
+ __u16 *iph16 = (__u16 *)iph;
+ __u32 csum;
+ int i;
+
+ iph->check = 0;
+
+#pragma clang loop unroll(full)
+ for (i = 0, csum = 0; i < sizeof(*iph) >> 1; i++)
+ csum += *iph16++;
+
+ iph->check = ~((csum & 0xffff) + (csum >> 16));
+}
+
+static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto,
+ __u16 l2_proto)
+{
+ __u16 udp_dst = UDP_PORT;
+ struct iphdr iph_inner;
+ struct v4hdr h_outer;
+ struct tcphdr tcph;
+ int olen, l2_len;
+ int tcp_off;
+ __u64 flags;
+
+ /* Most tests encapsulate a packet into a tunnel with the same
+ * network protocol, and derive the outer header fields from
+ * the inner header.
+ *
+ * The 6in4 case tests different inner and outer protocols. As
+ * the inner is ipv6, but the outer expects an ipv4 header as
+ * input, manually build a struct iphdr based on the ipv6hdr.
+ */
+ if (encap_proto == IPPROTO_IPV6) {
+ const __u32 saddr = (192 << 24) | (168 << 16) | (1 << 8) | 1;
+ const __u32 daddr = (192 << 24) | (168 << 16) | (1 << 8) | 2;
+ struct ipv6hdr iph6_inner;
+
+ /* Read the IPv6 header */
+ if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph6_inner,
+ sizeof(iph6_inner)) < 0)
+ return TC_ACT_OK;
+
+ /* Derive the IPv4 header fields from the IPv6 header */
+ memset(&iph_inner, 0, sizeof(iph_inner));
+ iph_inner.version = 4;
+ iph_inner.ihl = 5;
+ iph_inner.tot_len = bpf_htons(sizeof(iph6_inner) +
+ bpf_ntohs(iph6_inner.payload_len));
+ iph_inner.ttl = iph6_inner.hop_limit - 1;
+ iph_inner.protocol = iph6_inner.nexthdr;
+ iph_inner.saddr = __bpf_constant_htonl(saddr);
+ iph_inner.daddr = __bpf_constant_htonl(daddr);
+
+ tcp_off = sizeof(iph6_inner);
+ } else {
+ if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner,
+ sizeof(iph_inner)) < 0)
+ return TC_ACT_OK;
+
+ tcp_off = sizeof(iph_inner);
+ }
+
+ /* filter only packets we want */
+ if (iph_inner.ihl != 5 || iph_inner.protocol != IPPROTO_TCP)
+ return TC_ACT_OK;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN + tcp_off,
+ &tcph, sizeof(tcph)) < 0)
+ return TC_ACT_OK;
+
+ if (tcph.dest != __bpf_constant_htons(cfg_port))
+ return TC_ACT_OK;
+
+ olen = sizeof(h_outer.ip);
+ l2_len = 0;
+
+ flags = BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_ENCAP_L3_IPV4;
+
+ switch (l2_proto) {
+ case ETH_P_MPLS_UC:
+ l2_len = sizeof(mpls_label);
+ udp_dst = MPLS_OVER_UDP_PORT;
+ break;
+ case ETH_P_TEB:
+ l2_len = ETH_HLEN;
+ udp_dst = ETH_OVER_UDP_PORT;
+ break;
+ }
+ flags |= BPF_F_ADJ_ROOM_ENCAP_L2(l2_len);
+
+ switch (encap_proto) {
+ case IPPROTO_GRE:
+ flags |= BPF_F_ADJ_ROOM_ENCAP_L4_GRE;
+ olen += sizeof(h_outer.l4hdr.gre);
+ h_outer.l4hdr.gre.protocol = bpf_htons(l2_proto);
+ h_outer.l4hdr.gre.flags = 0;
+ break;
+ case IPPROTO_UDP:
+ flags |= BPF_F_ADJ_ROOM_ENCAP_L4_UDP;
+ olen += sizeof(h_outer.l4hdr.udp);
+ h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src);
+ h_outer.l4hdr.udp.dest = bpf_htons(udp_dst);
+ h_outer.l4hdr.udp.check = 0;
+ h_outer.l4hdr.udp.len = bpf_htons(bpf_ntohs(iph_inner.tot_len) +
+ sizeof(h_outer.l4hdr.udp) +
+ l2_len);
+ break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ break;
+ default:
+ return TC_ACT_OK;
+ }
+
+ /* add L2 encap (if specified) */
+ switch (l2_proto) {
+ case ETH_P_MPLS_UC:
+ *((__u32 *)((__u8 *)&h_outer + olen)) = mpls_label;
+ break;
+ case ETH_P_TEB:
+ if (bpf_skb_load_bytes(skb, 0, (__u8 *)&h_outer + olen,
+ ETH_HLEN))
+ return TC_ACT_SHOT;
+ break;
+ }
+ olen += l2_len;
+
+ /* add room between mac and network header */
+ if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags))
+ return TC_ACT_SHOT;
+
+ /* prepare new outer network header */
+ h_outer.ip = iph_inner;
+ h_outer.ip.tot_len = bpf_htons(olen +
+ bpf_ntohs(h_outer.ip.tot_len));
+ h_outer.ip.protocol = encap_proto;
+
+ set_ipv4_csum((void *)&h_outer.ip);
+
+ /* store new outer network header */
+ if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen,
+ BPF_F_INVALIDATE_HASH) < 0)
+ return TC_ACT_SHOT;
+
+ /* if changing outer proto type, update eth->h_proto */
+ if (encap_proto == IPPROTO_IPV6) {
+ struct ethhdr eth;
+
+ if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)) < 0)
+ return TC_ACT_SHOT;
+ eth.h_proto = bpf_htons(ETH_P_IP);
+ if (bpf_skb_store_bytes(skb, 0, &eth, sizeof(eth), 0) < 0)
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto,
+ __u16 l2_proto)
+{
+ __u16 udp_dst = UDP_PORT;
+ struct ipv6hdr iph_inner;
+ struct v6hdr h_outer;
+ struct tcphdr tcph;
+ int olen, l2_len;
+ __u16 tot_len;
+ __u64 flags;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner,
+ sizeof(iph_inner)) < 0)
+ return TC_ACT_OK;
+
+ /* filter only packets we want */
+ if (bpf_skb_load_bytes(skb, ETH_HLEN + sizeof(iph_inner),
+ &tcph, sizeof(tcph)) < 0)
+ return TC_ACT_OK;
+
+ if (tcph.dest != __bpf_constant_htons(cfg_port))
+ return TC_ACT_OK;
+
+ olen = sizeof(h_outer.ip);
+ l2_len = 0;
+
+ flags = BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_ENCAP_L3_IPV6;
+
+ switch (l2_proto) {
+ case ETH_P_MPLS_UC:
+ l2_len = sizeof(mpls_label);
+ udp_dst = MPLS_OVER_UDP_PORT;
+ break;
+ case ETH_P_TEB:
+ l2_len = ETH_HLEN;
+ udp_dst = ETH_OVER_UDP_PORT;
+ break;
+ }
+ flags |= BPF_F_ADJ_ROOM_ENCAP_L2(l2_len);
+
+ switch (encap_proto) {
+ case IPPROTO_GRE:
+ flags |= BPF_F_ADJ_ROOM_ENCAP_L4_GRE;
+ olen += sizeof(h_outer.l4hdr.gre);
+ h_outer.l4hdr.gre.protocol = bpf_htons(l2_proto);
+ h_outer.l4hdr.gre.flags = 0;
+ break;
+ case IPPROTO_UDP:
+ flags |= BPF_F_ADJ_ROOM_ENCAP_L4_UDP;
+ olen += sizeof(h_outer.l4hdr.udp);
+ h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src);
+ h_outer.l4hdr.udp.dest = bpf_htons(udp_dst);
+ tot_len = bpf_ntohs(iph_inner.payload_len) + sizeof(iph_inner) +
+ sizeof(h_outer.l4hdr.udp);
+ h_outer.l4hdr.udp.check = 0;
+ h_outer.l4hdr.udp.len = bpf_htons(tot_len);
+ break;
+ case IPPROTO_IPV6:
+ break;
+ default:
+ return TC_ACT_OK;
+ }
+
+ /* add L2 encap (if specified) */
+ switch (l2_proto) {
+ case ETH_P_MPLS_UC:
+ *((__u32 *)((__u8 *)&h_outer + olen)) = mpls_label;
+ break;
+ case ETH_P_TEB:
+ if (bpf_skb_load_bytes(skb, 0, (__u8 *)&h_outer + olen,
+ ETH_HLEN))
+ return TC_ACT_SHOT;
+ break;
+ }
+ olen += l2_len;
+
+ /* add room between mac and network header */
+ if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags))
+ return TC_ACT_SHOT;
+
+ /* prepare new outer network header */
+ h_outer.ip = iph_inner;
+ h_outer.ip.payload_len = bpf_htons(olen +
+ bpf_ntohs(h_outer.ip.payload_len));
+
+ h_outer.ip.nexthdr = encap_proto;
+
+ /* store new outer network header */
+ if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen,
+ BPF_F_INVALIDATE_HASH) < 0)
+ return TC_ACT_SHOT;
+
+ return TC_ACT_OK;
+}
+
+SEC("encap_ipip_none")
+int __encap_ipip_none(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+ return encap_ipv4(skb, IPPROTO_IPIP, ETH_P_IP);
+ else
+ return TC_ACT_OK;
+}
+
+SEC("encap_gre_none")
+int __encap_gre_none(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+ return encap_ipv4(skb, IPPROTO_GRE, ETH_P_IP);
+ else
+ return TC_ACT_OK;
+}
+
+SEC("encap_gre_mpls")
+int __encap_gre_mpls(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+ return encap_ipv4(skb, IPPROTO_GRE, ETH_P_MPLS_UC);
+ else
+ return TC_ACT_OK;
+}
+
+SEC("encap_gre_eth")
+int __encap_gre_eth(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+ return encap_ipv4(skb, IPPROTO_GRE, ETH_P_TEB);
+ else
+ return TC_ACT_OK;
+}
+
+SEC("encap_udp_none")
+int __encap_udp_none(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+ return encap_ipv4(skb, IPPROTO_UDP, ETH_P_IP);
+ else
+ return TC_ACT_OK;
+}
+
+SEC("encap_udp_mpls")
+int __encap_udp_mpls(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+ return encap_ipv4(skb, IPPROTO_UDP, ETH_P_MPLS_UC);
+ else
+ return TC_ACT_OK;
+}
+
+SEC("encap_udp_eth")
+int __encap_udp_eth(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IP))
+ return encap_ipv4(skb, IPPROTO_UDP, ETH_P_TEB);
+ else
+ return TC_ACT_OK;
+}
+
+SEC("encap_sit_none")
+int __encap_sit_none(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
+ return encap_ipv4(skb, IPPROTO_IPV6, ETH_P_IP);
+ else
+ return TC_ACT_OK;
+}
+
+SEC("encap_ip6tnl_none")
+int __encap_ip6tnl_none(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
+ return encap_ipv6(skb, IPPROTO_IPV6, ETH_P_IPV6);
+ else
+ return TC_ACT_OK;
+}
+
+SEC("encap_ip6gre_none")
+int __encap_ip6gre_none(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
+ return encap_ipv6(skb, IPPROTO_GRE, ETH_P_IPV6);
+ else
+ return TC_ACT_OK;
+}
+
+SEC("encap_ip6gre_mpls")
+int __encap_ip6gre_mpls(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
+ return encap_ipv6(skb, IPPROTO_GRE, ETH_P_MPLS_UC);
+ else
+ return TC_ACT_OK;
+}
+
+SEC("encap_ip6gre_eth")
+int __encap_ip6gre_eth(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
+ return encap_ipv6(skb, IPPROTO_GRE, ETH_P_TEB);
+ else
+ return TC_ACT_OK;
+}
+
+SEC("encap_ip6udp_none")
+int __encap_ip6udp_none(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
+ return encap_ipv6(skb, IPPROTO_UDP, ETH_P_IPV6);
+ else
+ return TC_ACT_OK;
+}
+
+SEC("encap_ip6udp_mpls")
+int __encap_ip6udp_mpls(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
+ return encap_ipv6(skb, IPPROTO_UDP, ETH_P_MPLS_UC);
+ else
+ return TC_ACT_OK;
+}
+
+SEC("encap_ip6udp_eth")
+int __encap_ip6udp_eth(struct __sk_buff *skb)
+{
+ if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6))
+ return encap_ipv6(skb, IPPROTO_UDP, ETH_P_TEB);
+ else
+ return TC_ACT_OK;
+}
+
+static int decap_internal(struct __sk_buff *skb, int off, int len, char proto)
+{
+ char buf[sizeof(struct v6hdr)];
+ struct gre_hdr greh;
+ struct udphdr udph;
+ int olen = len;
+
+ switch (proto) {
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ break;
+ case IPPROTO_GRE:
+ olen += sizeof(struct gre_hdr);
+ if (bpf_skb_load_bytes(skb, off + len, &greh, sizeof(greh)) < 0)
+ return TC_ACT_OK;
+ switch (bpf_ntohs(greh.protocol)) {
+ case ETH_P_MPLS_UC:
+ olen += sizeof(mpls_label);
+ break;
+ case ETH_P_TEB:
+ olen += ETH_HLEN;
+ break;
+ }
+ break;
+ case IPPROTO_UDP:
+ olen += sizeof(struct udphdr);
+ if (bpf_skb_load_bytes(skb, off + len, &udph, sizeof(udph)) < 0)
+ return TC_ACT_OK;
+ switch (bpf_ntohs(udph.dest)) {
+ case MPLS_OVER_UDP_PORT:
+ olen += sizeof(mpls_label);
+ break;
+ case ETH_OVER_UDP_PORT:
+ olen += ETH_HLEN;
+ break;
+ }
+ break;
+ default:
+ return TC_ACT_OK;
+ }
+
+ if (bpf_skb_adjust_room(skb, -olen, BPF_ADJ_ROOM_MAC,
+ BPF_F_ADJ_ROOM_FIXED_GSO))
+ return TC_ACT_SHOT;
+
+ return TC_ACT_OK;
+}
+
+static int decap_ipv4(struct __sk_buff *skb)
+{
+ struct iphdr iph_outer;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_outer,
+ sizeof(iph_outer)) < 0)
+ return TC_ACT_OK;
+
+ if (iph_outer.ihl != 5)
+ return TC_ACT_OK;
+
+ return decap_internal(skb, ETH_HLEN, sizeof(iph_outer),
+ iph_outer.protocol);
+}
+
+static int decap_ipv6(struct __sk_buff *skb)
+{
+ struct ipv6hdr iph_outer;
+
+ if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_outer,
+ sizeof(iph_outer)) < 0)
+ return TC_ACT_OK;
+
+ return decap_internal(skb, ETH_HLEN, sizeof(iph_outer),
+ iph_outer.nexthdr);
+}
+
+SEC("decap")
+int decap_f(struct __sk_buff *skb)
+{
+ switch (skb->protocol) {
+ case __bpf_constant_htons(ETH_P_IP):
+ return decap_ipv4(skb);
+ case __bpf_constant_htons(ETH_P_IPV6):
+ return decap_ipv6(skb);
+ default:
+ /* does not match, ignore */
+ return TC_ACT_OK;
+ }
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c b/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c
new file mode 100644
index 000000000000..1ab095bcacd8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+// Copyright (c) 2019 Cloudflare
+
+#include <string.h>
+
+#include <linux/bpf.h>
+#include <linux/pkt_cls.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <sys/socket.h>
+#include <linux/tcp.h>
+
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+
+struct bpf_map_def SEC("maps") results = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u64),
+ .max_entries = 1,
+};
+
+static __always_inline void check_syncookie(void *ctx, void *data,
+ void *data_end)
+{
+ struct bpf_sock_tuple tup;
+ struct bpf_sock *sk;
+ struct ethhdr *ethh;
+ struct iphdr *ipv4h;
+ struct ipv6hdr *ipv6h;
+ struct tcphdr *tcph;
+ int ret;
+ __u32 key = 0;
+ __u64 value = 1;
+
+ ethh = data;
+ if (ethh + 1 > data_end)
+ return;
+
+ switch (bpf_ntohs(ethh->h_proto)) {
+ case ETH_P_IP:
+ ipv4h = data + sizeof(struct ethhdr);
+ if (ipv4h + 1 > data_end)
+ return;
+
+ if (ipv4h->ihl != 5)
+ return;
+
+ tcph = data + sizeof(struct ethhdr) + sizeof(struct iphdr);
+ if (tcph + 1 > data_end)
+ return;
+
+ tup.ipv4.saddr = ipv4h->saddr;
+ tup.ipv4.daddr = ipv4h->daddr;
+ tup.ipv4.sport = tcph->source;
+ tup.ipv4.dport = tcph->dest;
+
+ sk = bpf_skc_lookup_tcp(ctx, &tup, sizeof(tup.ipv4),
+ BPF_F_CURRENT_NETNS, 0);
+ if (!sk)
+ return;
+
+ if (sk->state != BPF_TCP_LISTEN)
+ goto release;
+
+ ret = bpf_tcp_check_syncookie(sk, ipv4h, sizeof(*ipv4h),
+ tcph, sizeof(*tcph));
+ break;
+
+ case ETH_P_IPV6:
+ ipv6h = data + sizeof(struct ethhdr);
+ if (ipv6h + 1 > data_end)
+ return;
+
+ if (ipv6h->nexthdr != IPPROTO_TCP)
+ return;
+
+ tcph = data + sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
+ if (tcph + 1 > data_end)
+ return;
+
+ memcpy(tup.ipv6.saddr, &ipv6h->saddr, sizeof(tup.ipv6.saddr));
+ memcpy(tup.ipv6.daddr, &ipv6h->daddr, sizeof(tup.ipv6.daddr));
+ tup.ipv6.sport = tcph->source;
+ tup.ipv6.dport = tcph->dest;
+
+ sk = bpf_skc_lookup_tcp(ctx, &tup, sizeof(tup.ipv6),
+ BPF_F_CURRENT_NETNS, 0);
+ if (!sk)
+ return;
+
+ if (sk->state != BPF_TCP_LISTEN)
+ goto release;
+
+ ret = bpf_tcp_check_syncookie(sk, ipv6h, sizeof(*ipv6h),
+ tcph, sizeof(*tcph));
+ break;
+
+ default:
+ return;
+ }
+
+ if (ret == 0)
+ bpf_map_update_elem(&results, &key, &value, 0);
+
+release:
+ bpf_sk_release(sk);
+}
+
+SEC("clsact/check_syncookie")
+int check_syncookie_clsact(struct __sk_buff *skb)
+{
+ check_syncookie(skb, (void *)(long)skb->data,
+ (void *)(long)skb->data_end);
+ return TC_ACT_OK;
+}
+
+SEC("xdp/check_syncookie")
+int check_syncookie_xdp(struct xdp_md *ctx)
+{
+ check_syncookie(ctx, (void *)(long)ctx->data,
+ (void *)(long)ctx->data_end);
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_tcp_estats.c b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
index bee3bbecc0c4..bee3bbecc0c4 100644
--- a/tools/testing/selftests/bpf/test_tcp_estats.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
index 74f73b33a7b0..c7c3240e0dd4 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <stddef.h>
#include <string.h>
+#include <netinet/in.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
@@ -9,7 +10,6 @@
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/tcp.h>
-#include <netinet/in.h>
#include "bpf_helpers.h"
#include "bpf_endian.h"
#include "test_tcpbpf.h"
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
index edbca203ce2d..ec6db6e64c41 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <stddef.h>
#include <string.h>
+#include <netinet/in.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
@@ -9,7 +10,6 @@
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/tcp.h>
-#include <netinet/in.h>
#include "bpf_helpers.h"
#include "bpf_endian.h"
#include "test_tcpnotify.h"
diff --git a/tools/testing/selftests/bpf/test_tracepoint.c b/tools/testing/selftests/bpf/progs/test_tracepoint.c
index 04bf084517e0..04bf084517e0 100644
--- a/tools/testing/selftests/bpf/test_tracepoint.c
+++ b/tools/testing/selftests/bpf/progs/test_tracepoint.c
diff --git a/tools/testing/selftests/bpf/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
index 504df69c83df..504df69c83df 100644
--- a/tools/testing/selftests/bpf/test_tunnel_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale1.c b/tools/testing/selftests/bpf/progs/test_verif_scale1.c
new file mode 100644
index 000000000000..f3236ce35f31
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_verif_scale1.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+#define ATTR __attribute__((noinline))
+#include "test_jhash.h"
+
+SEC("scale90_noinline")
+int balancer_ingress(struct __sk_buff *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ void *ptr;
+ int ret = 0, nh_off, i = 0;
+
+ nh_off = 14;
+
+ /* pragma unroll doesn't work on large loops */
+
+#define C do { \
+ ptr = data + i; \
+ if (ptr + nh_off > data_end) \
+ break; \
+ ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
+ } while (0);
+#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;
+ C30;C30;C30; /* 90 calls */
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale2.c b/tools/testing/selftests/bpf/progs/test_verif_scale2.c
new file mode 100644
index 000000000000..77830693eccb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_verif_scale2.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+#define ATTR __attribute__((always_inline))
+#include "test_jhash.h"
+
+SEC("scale90_inline")
+int balancer_ingress(struct __sk_buff *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ void *ptr;
+ int ret = 0, nh_off, i = 0;
+
+ nh_off = 14;
+
+ /* pragma unroll doesn't work on large loops */
+
+#define C do { \
+ ptr = data + i; \
+ if (ptr + nh_off > data_end) \
+ break; \
+ ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
+ } while (0);
+#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;
+ C30;C30;C30; /* 90 calls */
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale3.c b/tools/testing/selftests/bpf/progs/test_verif_scale3.c
new file mode 100644
index 000000000000..1848da04ea41
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_verif_scale3.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+#define ATTR __attribute__((noinline))
+#include "test_jhash.h"
+
+SEC("scale90_noinline32")
+int balancer_ingress(struct __sk_buff *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ void *ptr;
+ int ret = 0, nh_off, i = 0;
+
+ nh_off = 32;
+
+ /* pragma unroll doesn't work on large loops */
+
+#define C do { \
+ ptr = data + i; \
+ if (ptr + nh_off > data_end) \
+ break; \
+ ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
+ } while (0);
+#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;
+ C30;C30;C30; /* 90 calls */
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_xdp.c b/tools/testing/selftests/bpf/progs/test_xdp.c
index 5e7df8bb5b5d..5e7df8bb5b5d 100644
--- a/tools/testing/selftests/bpf/test_xdp.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp.c
diff --git a/tools/testing/selftests/bpf/test_xdp_meta.c b/tools/testing/selftests/bpf/progs/test_xdp_meta.c
index 8d0182650653..8d0182650653 100644
--- a/tools/testing/selftests/bpf/test_xdp_meta.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_meta.c
diff --git a/tools/testing/selftests/bpf/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
index 5e4aac74f9d0..5e4aac74f9d0 100644
--- a/tools/testing/selftests/bpf/test_xdp_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c
index ef9e704be140..ef9e704be140 100644
--- a/tools/testing/selftests/bpf/test_xdp_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.c b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c
index 365a7d2d9f5c..365a7d2d9f5c 100644
--- a/tools/testing/selftests/bpf/test_xdp_vlan.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c
diff --git a/tools/testing/selftests/bpf/xdp_dummy.c b/tools/testing/selftests/bpf/progs/xdp_dummy.c
index 43b0ef1001ed..43b0ef1001ed 100644
--- a/tools/testing/selftests/bpf/xdp_dummy.c
+++ b/tools/testing/selftests/bpf/progs/xdp_dummy.c
diff --git a/tools/testing/selftests/bpf/tcp_client.py b/tools/testing/selftests/bpf/tcp_client.py
index 7f8200a8702b..a53ed58528d6 100755
--- a/tools/testing/selftests/bpf/tcp_client.py
+++ b/tools/testing/selftests/bpf/tcp_client.py
@@ -30,12 +30,11 @@ def send(sock, s):
serverPort = int(sys.argv[1])
-HostName = socket.gethostname()
# create active socket
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
try:
- sock.connect((HostName, serverPort))
+ sock.connect(('localhost', serverPort))
except socket.error as e:
sys.exit(1)
diff --git a/tools/testing/selftests/bpf/tcp_server.py b/tools/testing/selftests/bpf/tcp_server.py
index b39903fca4c8..0ca60d193bed 100755
--- a/tools/testing/selftests/bpf/tcp_server.py
+++ b/tools/testing/selftests/bpf/tcp_server.py
@@ -35,13 +35,10 @@ MAX_PORTS = 2
serverPort = SERVER_PORT
serverSocket = None
-HostName = socket.gethostname()
-
# create passive socket
serverSocket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
-host = socket.gethostname()
-try: serverSocket.bind((host, 0))
+try: serverSocket.bind(('localhost', 0))
except socket.error as msg:
print('bind fails: ' + str(msg))
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index 91420fa83b08..42c1ce988945 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -18,11 +18,13 @@
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
+#include <assert.h>
#include <bpf/libbpf.h>
#include <bpf/btf.h>
#include "bpf_rlimit.h"
#include "bpf_util.h"
+#include "test_btf.h"
#define MAX_INSNS 512
#define MAX_SUBPROGS 16
@@ -51,65 +53,19 @@ static int count_result(int err)
return err;
}
-#define __printf(a, b) __attribute__((format(printf, a, b)))
-
-__printf(1, 2)
-static int __base_pr(const char *format, ...)
+static int __base_pr(enum libbpf_print_level level __attribute__((unused)),
+ const char *format, va_list args)
{
- va_list args;
- int err;
-
- va_start(args, format);
- err = vfprintf(stderr, format, args);
- va_end(args);
- return err;
+ return vfprintf(stderr, format, args);
}
-#define BTF_INFO_ENC(kind, kind_flag, vlen) \
- ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
-
-#define BTF_TYPE_ENC(name, info, size_or_type) \
- (name), (info), (size_or_type)
-
-#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
- ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
-#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
- BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
- BTF_INT_ENC(encoding, bits_offset, bits)
-
-#define BTF_ARRAY_ENC(type, index_type, nr_elems) \
- (type), (index_type), (nr_elems)
-#define BTF_TYPE_ARRAY_ENC(type, index_type, nr_elems) \
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0), \
- BTF_ARRAY_ENC(type, index_type, nr_elems)
-
-#define BTF_MEMBER_ENC(name, type, bits_offset) \
- (name), (type), (bits_offset)
-#define BTF_ENUM_ENC(name, val) (name), (val)
-#define BTF_MEMBER_OFFSET(bitfield_size, bits_offset) \
- ((bitfield_size) << 24 | (bits_offset))
-
-#define BTF_TYPEDEF_ENC(name, type) \
- BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), type)
-
-#define BTF_PTR_ENC(type) \
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), type)
-
-#define BTF_CONST_ENC(type) \
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), type)
-
-#define BTF_FUNC_PROTO_ENC(ret_type, nargs) \
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, nargs), ret_type)
-
-#define BTF_FUNC_PROTO_ARG_ENC(name, type) \
- (name), (type)
-
-#define BTF_FUNC_ENC(name, func_proto) \
- BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), func_proto)
-
#define BTF_END_RAW 0xdeadbeef
#define NAME_TBD 0xdeadb33f
+#define NAME_NTH(N) (0xffff0000 | N)
+#define IS_NAME_NTH(X) ((X & 0xffff0000) == 0xffff0000)
+#define GET_NAME_NTH_IDX(X) (X & 0x0000ffff)
+
#define MAX_NR_RAW_U32 1024
#define BTF_LOG_BUF_SIZE 65535
@@ -118,12 +74,14 @@ static struct args {
unsigned int file_test_num;
unsigned int get_info_test_num;
unsigned int info_raw_test_num;
+ unsigned int dedup_test_num;
bool raw_test;
bool file_test;
bool get_info_test;
bool pprint_test;
bool always_log;
bool info_raw_test;
+ bool dedup_test;
} args;
static char btf_log_buf[BTF_LOG_BUF_SIZE];
@@ -134,6 +92,12 @@ static struct btf_header hdr_tmpl = {
.hdr_len = sizeof(struct btf_header),
};
+/* several different mapv kinds(types) supported by pprint */
+enum pprint_mapv_kind_t {
+ PPRINT_MAPV_KIND_BASIC = 0,
+ PPRINT_MAPV_KIND_INT128,
+};
+
struct btf_raw_test {
const char *descr;
const char *str_sec;
@@ -156,6 +120,7 @@ struct btf_raw_test {
int type_off_delta;
int str_off_delta;
int str_len_delta;
+ enum pprint_mapv_kind_t mapv_kind;
};
#define BTF_STR_SEC(str) \
@@ -270,7 +235,6 @@ static struct btf_raw_test raw_tests[] = {
.value_type_id = 3,
.max_entries = 4,
},
-
{
.descr = "struct test #3 Invalid member offset",
.raw_types = {
@@ -298,7 +262,664 @@ static struct btf_raw_test raw_tests[] = {
.btf_load_err = true,
.err_str = "Invalid member bits_offset",
},
-
+/*
+ * struct A {
+ * unsigned long long m;
+ * int n;
+ * char o;
+ * [3 bytes hole]
+ * int p[8];
+ * };
+ */
+{
+ .descr = "global data test #1",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* unsigned long long */
+ BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
+ /* char */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
+ /* int[8] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
+ /* struct A { */ /* [5] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+ BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
+ BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
+ BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
+ /* } */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0m\0n\0o\0p",
+ .str_sec_size = sizeof("\0A\0m\0n\0o\0p"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_test1_map",
+ .key_size = sizeof(int),
+ .value_size = 48,
+ .key_type_id = 1,
+ .value_type_id = 5,
+ .max_entries = 4,
+},
+/*
+ * struct A {
+ * unsigned long long m;
+ * int n;
+ * char o;
+ * [3 bytes hole]
+ * int p[8];
+ * };
+ * static struct A t; <- in .bss
+ */
+{
+ .descr = "global data test #2",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* unsigned long long */
+ BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
+ /* char */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
+ /* int[8] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
+ /* struct A { */ /* [5] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+ BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
+ BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
+ BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
+ /* } */
+ /* static struct A t */
+ BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
+ /* .bss section */ /* [7] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 48),
+ BTF_VAR_SECINFO_ENC(6, 0, 48),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0m\0n\0o\0p\0t\0.bss",
+ .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0.bss"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 48,
+ .key_type_id = 0,
+ .value_type_id = 7,
+ .max_entries = 1,
+},
+{
+ .descr = "global data test #3",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* static int t */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
+ /* .bss section */ /* [3] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0t\0.bss",
+ .str_sec_size = sizeof("\0t\0.bss"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 0,
+ .value_type_id = 3,
+ .max_entries = 1,
+},
+{
+ .descr = "global data test #4, unsupported linkage",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* static int t */
+ BTF_VAR_ENC(NAME_TBD, 1, 2), /* [2] */
+ /* .bss section */ /* [3] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0t\0.bss",
+ .str_sec_size = sizeof("\0t\0.bss"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 0,
+ .value_type_id = 3,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Linkage not supported",
+},
+{
+ .descr = "global data test #5, invalid var type",
+ .raw_types = {
+ /* static void t */
+ BTF_VAR_ENC(NAME_TBD, 0, 0), /* [1] */
+ /* .bss section */ /* [2] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(1, 0, 4),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0t\0.bss",
+ .str_sec_size = sizeof("\0t\0.bss"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 0,
+ .value_type_id = 2,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid type_id",
+},
+{
+ .descr = "global data test #6, invalid var type (fwd type)",
+ .raw_types = {
+ /* union A */
+ BTF_TYPE_ENC(NAME_TBD,
+ BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [1] */
+ /* static union A t */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
+ /* .bss section */ /* [3] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0t\0.bss",
+ .str_sec_size = sizeof("\0A\0t\0.bss"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 0,
+ .value_type_id = 2,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid type",
+},
+{
+ .descr = "global data test #7, invalid var type (fwd type)",
+ .raw_types = {
+ /* union A */
+ BTF_TYPE_ENC(NAME_TBD,
+ BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [1] */
+ /* static union A t */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
+ /* .bss section */ /* [3] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(1, 0, 4),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0t\0.bss",
+ .str_sec_size = sizeof("\0A\0t\0.bss"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 0,
+ .value_type_id = 2,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid type",
+},
+{
+ .descr = "global data test #8, invalid var size",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* unsigned long long */
+ BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
+ /* char */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
+ /* int[8] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
+ /* struct A { */ /* [5] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+ BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
+ BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
+ BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
+ /* } */
+ /* static struct A t */
+ BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
+ /* .bss section */ /* [7] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 48),
+ BTF_VAR_SECINFO_ENC(6, 0, 47),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0m\0n\0o\0p\0t\0.bss",
+ .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0.bss"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 48,
+ .key_type_id = 0,
+ .value_type_id = 7,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid size",
+},
+{
+ .descr = "global data test #9, invalid var size",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* unsigned long long */
+ BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
+ /* char */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
+ /* int[8] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
+ /* struct A { */ /* [5] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+ BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
+ BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
+ BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
+ /* } */
+ /* static struct A t */
+ BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
+ /* .bss section */ /* [7] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 46),
+ BTF_VAR_SECINFO_ENC(6, 0, 48),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0m\0n\0o\0p\0t\0.bss",
+ .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0.bss"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 48,
+ .key_type_id = 0,
+ .value_type_id = 7,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid size",
+},
+{
+ .descr = "global data test #10, invalid var size",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* unsigned long long */
+ BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
+ /* char */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
+ /* int[8] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
+ /* struct A { */ /* [5] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+ BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
+ BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
+ BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
+ /* } */
+ /* static struct A t */
+ BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
+ /* .bss section */ /* [7] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 46),
+ BTF_VAR_SECINFO_ENC(6, 0, 46),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0m\0n\0o\0p\0t\0.bss",
+ .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0.bss"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 48,
+ .key_type_id = 0,
+ .value_type_id = 7,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid size",
+},
+{
+ .descr = "global data test #11, multiple section members",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* unsigned long long */
+ BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
+ /* char */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
+ /* int[8] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
+ /* struct A { */ /* [5] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+ BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
+ BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
+ BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
+ /* } */
+ /* static struct A t */
+ BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
+ /* static int u */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [7] */
+ /* .bss section */ /* [8] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2), 62),
+ BTF_VAR_SECINFO_ENC(6, 10, 48),
+ BTF_VAR_SECINFO_ENC(7, 58, 4),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0m\0n\0o\0p\0t\0u\0.bss",
+ .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0u\0.bss"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 62,
+ .key_type_id = 0,
+ .value_type_id = 8,
+ .max_entries = 1,
+},
+{
+ .descr = "global data test #12, invalid offset",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* unsigned long long */
+ BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
+ /* char */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
+ /* int[8] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
+ /* struct A { */ /* [5] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+ BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
+ BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
+ BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
+ /* } */
+ /* static struct A t */
+ BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
+ /* static int u */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [7] */
+ /* .bss section */ /* [8] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2), 62),
+ BTF_VAR_SECINFO_ENC(6, 10, 48),
+ BTF_VAR_SECINFO_ENC(7, 60, 4),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0m\0n\0o\0p\0t\0u\0.bss",
+ .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0u\0.bss"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 62,
+ .key_type_id = 0,
+ .value_type_id = 8,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid offset+size",
+},
+{
+ .descr = "global data test #13, invalid offset",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* unsigned long long */
+ BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
+ /* char */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
+ /* int[8] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
+ /* struct A { */ /* [5] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+ BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
+ BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
+ BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
+ /* } */
+ /* static struct A t */
+ BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
+ /* static int u */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [7] */
+ /* .bss section */ /* [8] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2), 62),
+ BTF_VAR_SECINFO_ENC(6, 10, 48),
+ BTF_VAR_SECINFO_ENC(7, 12, 4),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0m\0n\0o\0p\0t\0u\0.bss",
+ .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0u\0.bss"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 62,
+ .key_type_id = 0,
+ .value_type_id = 8,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid offset",
+},
+{
+ .descr = "global data test #14, invalid offset",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* unsigned long long */
+ BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
+ /* char */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1), /* [3] */
+ /* int[8] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 8), /* [4] */
+ /* struct A { */ /* [5] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 4), 48),
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* unsigned long long m;*/
+ BTF_MEMBER_ENC(NAME_TBD, 1, 64),/* int n; */
+ BTF_MEMBER_ENC(NAME_TBD, 3, 96),/* char o; */
+ BTF_MEMBER_ENC(NAME_TBD, 4, 128),/* int p[8] */
+ /* } */
+ /* static struct A t */
+ BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
+ /* static int u */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [7] */
+ /* .bss section */ /* [8] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2), 62),
+ BTF_VAR_SECINFO_ENC(7, 58, 4),
+ BTF_VAR_SECINFO_ENC(6, 10, 48),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0m\0n\0o\0p\0t\0u\0.bss",
+ .str_sec_size = sizeof("\0A\0m\0n\0o\0p\0t\0u\0.bss"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 62,
+ .key_type_id = 0,
+ .value_type_id = 8,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid offset",
+},
+{
+ .descr = "global data test #15, not var kind",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
+ /* .bss section */ /* [3] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(1, 0, 4),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0t\0.bss",
+ .str_sec_size = sizeof("\0A\0t\0.bss"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 0,
+ .value_type_id = 3,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Not a VAR kind member",
+},
+{
+ .descr = "global data test #16, invalid var referencing sec",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_VAR_ENC(NAME_TBD, 5, 0), /* [2] */
+ BTF_VAR_ENC(NAME_TBD, 2, 0), /* [3] */
+ /* a section */ /* [4] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(3, 0, 4),
+ /* a section */ /* [5] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(6, 0, 4),
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [6] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0t\0s\0a\0a",
+ .str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 0,
+ .value_type_id = 4,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid type_id",
+},
+{
+ .descr = "global data test #17, invalid var referencing var",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */
+ BTF_VAR_ENC(NAME_TBD, 2, 0), /* [3] */
+ /* a section */ /* [4] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(3, 0, 4),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0t\0s\0a\0a",
+ .str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 0,
+ .value_type_id = 4,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid type_id",
+},
+{
+ .descr = "global data test #18, invalid var loop",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_VAR_ENC(NAME_TBD, 2, 0), /* [2] */
+ /* .bss section */ /* [3] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0t\0aaa",
+ .str_sec_size = sizeof("\0A\0t\0aaa"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 0,
+ .value_type_id = 4,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid type_id",
+},
+{
+ .descr = "global data test #19, invalid var referencing var",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_VAR_ENC(NAME_TBD, 3, 0), /* [2] */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [3] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0t\0s\0a\0a",
+ .str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 0,
+ .value_type_id = 4,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid type_id",
+},
+{
+ .descr = "global data test #20, invalid ptr referencing var",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* PTR type_id=3 */ /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3),
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [3] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0t\0s\0a\0a",
+ .str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 0,
+ .value_type_id = 4,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid type_id",
+},
+{
+ .descr = "global data test #21, var included in struct",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* struct A { */ /* [2] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), sizeof(int) * 2),
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0), /* int m; */
+ BTF_MEMBER_ENC(NAME_TBD, 3, 32),/* VAR type_id=3; */
+ /* } */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [3] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0t\0s\0a\0a",
+ .str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 0,
+ .value_type_id = 4,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid member",
+},
+{
+ .descr = "global data test #22, array of var",
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ARRAY_ENC(3, 1, 4), /* [2] */
+ BTF_VAR_ENC(NAME_TBD, 1, 0), /* [3] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0t\0s\0a\0a",
+ .str_sec_size = sizeof("\0A\0t\0s\0a\0a"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 0,
+ .value_type_id = 4,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Invalid elem",
+},
/* Test member exceeds the size of struct.
*
* struct A {
@@ -1954,7 +2575,7 @@ static struct btf_raw_test raw_tests[] = {
/* void (*)(int a, unsigned int <bad_name_off>) */
BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
- BTF_FUNC_PROTO_ARG_ENC(0xffffffff, 2),
+ BTF_FUNC_PROTO_ARG_ENC(0x0fffffff, 2),
BTF_END_RAW,
},
.str_sec = "\0a",
@@ -2704,6 +3325,99 @@ static struct btf_raw_test raw_tests[] = {
.err_str = "Invalid member offset",
},
+{
+ .descr = "128-bit int",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "int_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct, 128-bit int member",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct, 120-bit int member bitfield",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 120, 16), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct, kind_flag, 128-bit int member",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 16), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct, kind_flag, 120-bit int member bitfield",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 16), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(120, 0)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
}; /* struct btf_raw_test raw_tests[] */
static const char *get_next_str(const char *start, const char *end)
@@ -2731,11 +3445,13 @@ static void *btf_raw_create(const struct btf_header *hdr,
const char **ret_next_str)
{
const char *next_str = str, *end_str = str + str_sec_size;
+ const char **strs_idx = NULL, **tmp_strs_idx;
+ int strs_cap = 0, strs_cnt = 0, next_str_idx = 0;
unsigned int size_needed, offset;
struct btf_header *ret_hdr;
- int i, type_sec_size;
+ int i, type_sec_size, err = 0;
uint32_t *ret_types;
- void *raw_btf;
+ void *raw_btf = NULL;
type_sec_size = get_raw_sec_size(raw_types);
if (CHECK(type_sec_size < 0, "Cannot get nr_raw_types"))
@@ -2750,17 +3466,44 @@ static void *btf_raw_create(const struct btf_header *hdr,
memcpy(raw_btf, hdr, sizeof(*hdr));
offset = sizeof(*hdr);
+ /* Index strings */
+ while ((next_str = get_next_str(next_str, end_str))) {
+ if (strs_cnt == strs_cap) {
+ strs_cap += max(16, strs_cap / 2);
+ tmp_strs_idx = realloc(strs_idx,
+ sizeof(*strs_idx) * strs_cap);
+ if (CHECK(!tmp_strs_idx,
+ "Cannot allocate memory for strs_idx")) {
+ err = -1;
+ goto done;
+ }
+ strs_idx = tmp_strs_idx;
+ }
+ strs_idx[strs_cnt++] = next_str;
+ next_str += strlen(next_str);
+ }
+
/* Copy type section */
ret_types = raw_btf + offset;
for (i = 0; i < type_sec_size / sizeof(raw_types[0]); i++) {
if (raw_types[i] == NAME_TBD) {
- next_str = get_next_str(next_str, end_str);
- if (CHECK(!next_str, "Error in getting next_str")) {
- free(raw_btf);
- return NULL;
+ if (CHECK(next_str_idx == strs_cnt,
+ "Error in getting next_str #%d",
+ next_str_idx)) {
+ err = -1;
+ goto done;
}
- ret_types[i] = next_str - str;
- next_str += strlen(next_str);
+ ret_types[i] = strs_idx[next_str_idx++] - str;
+ } else if (IS_NAME_NTH(raw_types[i])) {
+ int idx = GET_NAME_NTH_IDX(raw_types[i]);
+
+ if (CHECK(idx <= 0 || idx > strs_cnt,
+ "Error getting string #%d, strs_cnt:%d",
+ idx, strs_cnt)) {
+ err = -1;
+ goto done;
+ }
+ ret_types[i] = strs_idx[idx-1] - str;
} else {
ret_types[i] = raw_types[i];
}
@@ -2777,8 +3520,17 @@ static void *btf_raw_create(const struct btf_header *hdr,
*btf_size = size_needed;
if (ret_next_str)
- *ret_next_str = next_str;
+ *ret_next_str =
+ next_str_idx < strs_cnt ? strs_idx[next_str_idx] : NULL;
+done:
+ if (err) {
+ if (raw_btf)
+ free(raw_btf);
+ if (strs_idx)
+ free(strs_idx);
+ return NULL;
+ }
return raw_btf;
}
@@ -3525,8 +4277,19 @@ struct pprint_mapv {
} aenum;
uint32_t ui32b;
uint32_t bits2c:2;
+ uint8_t si8_4[2][2];
};
+#ifdef __SIZEOF_INT128__
+struct pprint_mapv_int128 {
+ __int128 si128a;
+ __int128 si128b;
+ unsigned __int128 bits3:3;
+ unsigned __int128 bits80:80;
+ unsigned __int128 ui128;
+};
+#endif
+
static struct btf_raw_test pprint_test_template[] = {
{
.raw_types = {
@@ -3567,7 +4330,7 @@ static struct btf_raw_test pprint_test_template[] = {
BTF_ENUM_ENC(NAME_TBD, 2),
BTF_ENUM_ENC(NAME_TBD, 3),
/* struct pprint_mapv */ /* [16] */
- BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 10), 40),
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 11), 40),
BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */
BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */
BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */
@@ -3578,9 +4341,12 @@ static struct btf_raw_test pprint_test_template[] = {
BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */
BTF_MEMBER_ENC(NAME_TBD, 11, 224), /* uint32_t ui32b */
BTF_MEMBER_ENC(NAME_TBD, 6, 256), /* bits2c */
+ BTF_MEMBER_ENC(NAME_TBD, 17, 264), /* si8_4 */
+ BTF_TYPE_ARRAY_ENC(18, 1, 2), /* [17] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 2), /* [18] */
BTF_END_RAW,
},
- BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"),
+ BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0si8_4"),
.key_size = sizeof(unsigned int),
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
@@ -3629,7 +4395,7 @@ static struct btf_raw_test pprint_test_template[] = {
BTF_ENUM_ENC(NAME_TBD, 2),
BTF_ENUM_ENC(NAME_TBD, 3),
/* struct pprint_mapv */ /* [16] */
- BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40),
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 11), 40),
BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */
BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
@@ -3640,9 +4406,12 @@ static struct btf_raw_test pprint_test_template[] = {
BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */
BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */
BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */
+ BTF_MEMBER_ENC(NAME_TBD, 17, 264), /* si8_4 */
+ BTF_TYPE_ARRAY_ENC(18, 1, 2), /* [17] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 2), /* [18] */
BTF_END_RAW,
},
- BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"),
+ BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0si8_4"),
.key_size = sizeof(unsigned int),
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
@@ -3693,7 +4462,7 @@ static struct btf_raw_test pprint_test_template[] = {
BTF_ENUM_ENC(NAME_TBD, 2),
BTF_ENUM_ENC(NAME_TBD, 3),
/* struct pprint_mapv */ /* [16] */
- BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40),
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 11), 40),
BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */
BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
@@ -3704,13 +4473,16 @@ static struct btf_raw_test pprint_test_template[] = {
BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */
BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */
BTF_MEMBER_ENC(NAME_TBD, 17, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */
+ BTF_MEMBER_ENC(NAME_TBD, 20, BTF_MEMBER_OFFSET(0, 264)), /* si8_4 */
/* typedef unsigned int ___int */ /* [17] */
BTF_TYPEDEF_ENC(NAME_TBD, 18),
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6), /* [18] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15), /* [19] */
+ BTF_TYPE_ARRAY_ENC(21, 1, 2), /* [20] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 2), /* [21] */
BTF_END_RAW,
},
- BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0___int"),
+ BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0___int\0si8_4"),
.key_size = sizeof(unsigned int),
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
@@ -3718,6 +4490,35 @@ static struct btf_raw_test pprint_test_template[] = {
.max_entries = 128 * 1024,
},
+#ifdef __SIZEOF_INT128__
+{
+ /* test int128 */
+ .raw_types = {
+ /* unsigned int */ /* [1] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4),
+ /* __int128 */ /* [2] */
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 128, 16),
+ /* unsigned __int128 */ /* [3] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 128, 16),
+ /* struct pprint_mapv_int128 */ /* [4] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 5), 64),
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)), /* si128a */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 128)), /* si128b */
+ BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(3, 256)), /* bits3 */
+ BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(80, 259)), /* bits80 */
+ BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(0, 384)), /* ui128 */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0unsigned int\0__int128\0unsigned __int128\0pprint_mapv_int128\0si128a\0si128b\0bits3\0bits80\0ui128"),
+ .key_size = sizeof(unsigned int),
+ .value_size = sizeof(struct pprint_mapv_int128),
+ .key_type_id = 1,
+ .value_type_id = 4,
+ .max_entries = 128 * 1024,
+ .mapv_kind = PPRINT_MAPV_KIND_INT128,
+},
+#endif
+
};
static struct btf_pprint_test_meta {
@@ -3784,24 +4585,114 @@ static struct btf_pprint_test_meta {
};
+static size_t get_pprint_mapv_size(enum pprint_mapv_kind_t mapv_kind)
+{
+ if (mapv_kind == PPRINT_MAPV_KIND_BASIC)
+ return sizeof(struct pprint_mapv);
+
+#ifdef __SIZEOF_INT128__
+ if (mapv_kind == PPRINT_MAPV_KIND_INT128)
+ return sizeof(struct pprint_mapv_int128);
+#endif
-static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i,
+ assert(0);
+}
+
+static void set_pprint_mapv(enum pprint_mapv_kind_t mapv_kind,
+ void *mapv, uint32_t i,
int num_cpus, int rounded_value_size)
{
int cpu;
- for (cpu = 0; cpu < num_cpus; cpu++) {
- v->ui32 = i + cpu;
- v->si32 = -i;
- v->unused_bits2a = 3;
- v->bits28 = i;
- v->unused_bits2b = 3;
- v->ui64 = i;
- v->aenum = i & 0x03;
- v->ui32b = 4;
- v->bits2c = 1;
- v = (void *)v + rounded_value_size;
+ if (mapv_kind == PPRINT_MAPV_KIND_BASIC) {
+ struct pprint_mapv *v = mapv;
+
+ for (cpu = 0; cpu < num_cpus; cpu++) {
+ v->ui32 = i + cpu;
+ v->si32 = -i;
+ v->unused_bits2a = 3;
+ v->bits28 = i;
+ v->unused_bits2b = 3;
+ v->ui64 = i;
+ v->aenum = i & 0x03;
+ v->ui32b = 4;
+ v->bits2c = 1;
+ v->si8_4[0][0] = (cpu + i) & 0xff;
+ v->si8_4[0][1] = (cpu + i + 1) & 0xff;
+ v->si8_4[1][0] = (cpu + i + 2) & 0xff;
+ v->si8_4[1][1] = (cpu + i + 3) & 0xff;
+ v = (void *)v + rounded_value_size;
+ }
+ }
+
+#ifdef __SIZEOF_INT128__
+ if (mapv_kind == PPRINT_MAPV_KIND_INT128) {
+ struct pprint_mapv_int128 *v = mapv;
+
+ for (cpu = 0; cpu < num_cpus; cpu++) {
+ v->si128a = i;
+ v->si128b = -i;
+ v->bits3 = i & 0x07;
+ v->bits80 = (((unsigned __int128)1) << 64) + i;
+ v->ui128 = (((unsigned __int128)2) << 64) + i;
+ v = (void *)v + rounded_value_size;
+ }
+ }
+#endif
+}
+
+ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind,
+ char *expected_line, ssize_t line_size,
+ bool percpu_map, unsigned int next_key,
+ int cpu, void *mapv)
+{
+ ssize_t nexpected_line = -1;
+
+ if (mapv_kind == PPRINT_MAPV_KIND_BASIC) {
+ struct pprint_mapv *v = mapv;
+
+ nexpected_line = snprintf(expected_line, line_size,
+ "%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
+ "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
+ "%u,0x%x,[[%d,%d],[%d,%d]]}\n",
+ percpu_map ? "\tcpu" : "",
+ percpu_map ? cpu : next_key,
+ v->ui32, v->si32,
+ v->unused_bits2a,
+ v->bits28,
+ v->unused_bits2b,
+ v->ui64,
+ v->ui8a[0], v->ui8a[1],
+ v->ui8a[2], v->ui8a[3],
+ v->ui8a[4], v->ui8a[5],
+ v->ui8a[6], v->ui8a[7],
+ pprint_enum_str[v->aenum],
+ v->ui32b,
+ v->bits2c,
+ v->si8_4[0][0], v->si8_4[0][1],
+ v->si8_4[1][0], v->si8_4[1][1]);
}
+
+#ifdef __SIZEOF_INT128__
+ if (mapv_kind == PPRINT_MAPV_KIND_INT128) {
+ struct pprint_mapv_int128 *v = mapv;
+
+ nexpected_line = snprintf(expected_line, line_size,
+ "%s%u: {0x%lx,0x%lx,0x%lx,"
+ "0x%lx%016lx,0x%lx%016lx}\n",
+ percpu_map ? "\tcpu" : "",
+ percpu_map ? cpu : next_key,
+ (uint64_t)v->si128a,
+ (uint64_t)v->si128b,
+ (uint64_t)v->bits3,
+ (uint64_t)(v->bits80 >> 64),
+ (uint64_t)v->bits80,
+ (uint64_t)(v->ui128 >> 64),
+ (uint64_t)v->ui128);
+ }
+#endif
+
+ return nexpected_line;
}
static int check_line(const char *expected_line, int nexpected_line,
@@ -3825,10 +4716,10 @@ static int check_line(const char *expected_line, int nexpected_line,
static int do_test_pprint(int test_num)
{
const struct btf_raw_test *test = &pprint_test_template[test_num];
+ enum pprint_mapv_kind_t mapv_kind = test->mapv_kind;
struct bpf_create_map_attr create_attr = {};
bool ordered_map, lossless_map, percpu_map;
int err, ret, num_cpus, rounded_value_size;
- struct pprint_mapv *mapv = NULL;
unsigned int key, nr_read_elems;
int map_fd = -1, btf_fd = -1;
unsigned int raw_btf_size;
@@ -3837,6 +4728,7 @@ static int do_test_pprint(int test_num)
char pin_path[255];
size_t line_len = 0;
char *line = NULL;
+ void *mapv = NULL;
uint8_t *raw_btf;
ssize_t nread;
@@ -3889,7 +4781,7 @@ static int do_test_pprint(int test_num)
percpu_map = test->percpu_map;
num_cpus = percpu_map ? bpf_num_possible_cpus() : 1;
- rounded_value_size = round_up(sizeof(struct pprint_mapv), 8);
+ rounded_value_size = round_up(get_pprint_mapv_size(mapv_kind), 8);
mapv = calloc(num_cpus, rounded_value_size);
if (CHECK(!mapv, "mapv allocation failure")) {
err = -1;
@@ -3897,7 +4789,7 @@ static int do_test_pprint(int test_num)
}
for (key = 0; key < test->max_entries; key++) {
- set_pprint_mapv(mapv, key, num_cpus, rounded_value_size);
+ set_pprint_mapv(mapv_kind, mapv, key, num_cpus, rounded_value_size);
bpf_map_update_elem(map_fd, &key, mapv, 0);
}
@@ -3921,13 +4813,13 @@ static int do_test_pprint(int test_num)
ordered_map = test->ordered_map;
lossless_map = test->lossless_map;
do {
- struct pprint_mapv *cmapv;
ssize_t nexpected_line;
unsigned int next_key;
+ void *cmapv;
int cpu;
next_key = ordered_map ? nr_read_elems : atoi(line);
- set_pprint_mapv(mapv, next_key, num_cpus, rounded_value_size);
+ set_pprint_mapv(mapv_kind, mapv, next_key, num_cpus, rounded_value_size);
cmapv = mapv;
for (cpu = 0; cpu < num_cpus; cpu++) {
@@ -3960,31 +4852,16 @@ static int do_test_pprint(int test_num)
break;
}
- nexpected_line = snprintf(expected_line, sizeof(expected_line),
- "%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
- "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
- "%u,0x%x}\n",
- percpu_map ? "\tcpu" : "",
- percpu_map ? cpu : next_key,
- cmapv->ui32, cmapv->si32,
- cmapv->unused_bits2a,
- cmapv->bits28,
- cmapv->unused_bits2b,
- cmapv->ui64,
- cmapv->ui8a[0], cmapv->ui8a[1],
- cmapv->ui8a[2], cmapv->ui8a[3],
- cmapv->ui8a[4], cmapv->ui8a[5],
- cmapv->ui8a[6], cmapv->ui8a[7],
- pprint_enum_str[cmapv->aenum],
- cmapv->ui32b,
- cmapv->bits2c);
-
+ nexpected_line = get_pprint_expected_line(mapv_kind, expected_line,
+ sizeof(expected_line),
+ percpu_map, next_key,
+ cpu, cmapv);
err = check_line(expected_line, nexpected_line,
sizeof(expected_line), line);
if (err == -1)
goto done;
- cmapv = (void *)cmapv + rounded_value_size;
+ cmapv = cmapv + rounded_value_size;
}
if (percpu_map) {
@@ -4080,6 +4957,10 @@ static struct prog_info_raw_test {
__u32 line_info_rec_size;
__u32 nr_jited_ksyms;
bool expected_prog_load_failure;
+ __u32 dead_code_cnt;
+ __u32 dead_code_mask;
+ __u32 dead_func_cnt;
+ __u32 dead_func_mask;
} info_raw_tests[] = {
{
.descr = "func_type (main func + one sub)",
@@ -4506,6 +5387,369 @@ static struct prog_info_raw_test {
.expected_prog_load_failure = true,
},
+{
+ .descr = "line_info (dead start)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0/* dead jmp */\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 0,
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 5, 6),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 1,
+ .dead_code_cnt = 1,
+ .dead_code_mask = 0x01,
+},
+
+{
+ .descr = "line_info (dead end)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0/* dead jmp */\0return a + b;\0/* dead exit */"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 0,
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 12),
+ BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 11),
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 9),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 5, 8),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 6, 7),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 1,
+ .dead_code_cnt = 2,
+ .dead_code_mask = 0x28,
+},
+
+{
+ .descr = "line_info (dead code + subprog + func_info)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0sub\0main\0int a=1+1;\0/* dead jmp */"
+ "\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
+ "\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
+ "\0return func(a);\0b+=1;\0return b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 8),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 2,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {14, 3} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(14, 0, NAME_TBD, 3, 8),
+ BPF_LINE_INFO_ENC(16, 0, NAME_TBD, 4, 7),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .dead_code_cnt = 9,
+ .dead_code_mask = 0x3fe,
+},
+
+{
+ .descr = "line_info (dead subprog)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* live call */"
+ "\0return 0;\0return 0;\0/* dead */\0/* dead */"
+ "\0/* dead */\0return bla + 1;\0return bla + 1;"
+ "\0return bla + 1;\0return func(a);\0b+=1;\0return b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_CALL_REL(3),
+ BPF_CALL_REL(5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 3,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {6, 3}, {9, 5} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .dead_code_cnt = 3,
+ .dead_code_mask = 0x70,
+ .dead_func_cnt = 1,
+ .dead_func_mask = 0x2,
+},
+
+{
+ .descr = "line_info (dead last subprog)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0dead\0main\0int a=1+1;\0/* live call */"
+ "\0return 0;\0/* dead */\0/* dead */"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_CALL_REL(2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 2,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {5, 3} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 1,
+ .dead_code_cnt = 2,
+ .dead_code_mask = 0x18,
+ .dead_func_cnt = 1,
+ .dead_func_mask = 0x2,
+},
+
+{
+ .descr = "line_info (dead subprog + dead start)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* dead */"
+ "\0return 0;\0return 0;\0return 0;"
+ "\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
+ "\0return b + 1;\0return b + 1;\0return b + 1;"),
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_CALL_REL(3),
+ BPF_CALL_REL(5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 3,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {7, 3}, {10, 5} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(13, 0, NAME_TBD, 2, 9),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .dead_code_cnt = 5,
+ .dead_code_mask = 0x1e2,
+ .dead_func_cnt = 1,
+ .dead_func_mask = 0x2,
+},
+
+{
+ .descr = "line_info (dead subprog + dead start w/ move)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* live call */"
+ "\0return 0;\0return 0;\0/* dead */\0/* dead */"
+ "\0/* dead */\0return bla + 1;\0return bla + 1;"
+ "\0return bla + 1;\0return func(a);\0b+=1;\0return b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_CALL_REL(3),
+ BPF_CALL_REL(5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 3,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {6, 3}, {9, 5} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .dead_code_cnt = 3,
+ .dead_code_mask = 0x70,
+ .dead_func_cnt = 1,
+ .dead_func_mask = 0x2,
+},
+
+{
+ .descr = "line_info (dead end + subprog start w/ no linfo)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0main\0func\0/* main linfo */\0/* func linfo */"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 1, 3),
+ BPF_CALL_REL(3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 2,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 3}, {6, 4}, },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+},
+
};
static size_t probe_prog_length(const struct bpf_insn *fp)
@@ -4565,6 +5809,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
struct bpf_func_info *finfo;
__u32 info_len, rec_size, i;
void *func_info = NULL;
+ __u32 nr_func_info;
int err;
/* get necessary lens */
@@ -4574,7 +5819,8 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
fprintf(stderr, "%s\n", btf_log_buf);
return -1;
}
- if (CHECK(info.nr_func_info != test->func_info_cnt,
+ nr_func_info = test->func_info_cnt - test->dead_func_cnt;
+ if (CHECK(info.nr_func_info != nr_func_info,
"incorrect info.nr_func_info (1st) %d",
info.nr_func_info)) {
return -1;
@@ -4595,7 +5841,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
/* reset info to only retrieve func_info related data */
memset(&info, 0, sizeof(info));
- info.nr_func_info = test->func_info_cnt;
+ info.nr_func_info = nr_func_info;
info.func_info_rec_size = rec_size;
info.func_info = ptr_to_u64(func_info);
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
@@ -4604,7 +5850,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
err = -1;
goto done;
}
- if (CHECK(info.nr_func_info != test->func_info_cnt,
+ if (CHECK(info.nr_func_info != nr_func_info,
"incorrect info.nr_func_info (2nd) %d",
info.nr_func_info)) {
err = -1;
@@ -4618,7 +5864,9 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
}
finfo = func_info;
- for (i = 0; i < test->func_info_cnt; i++) {
+ for (i = 0; i < nr_func_info; i++) {
+ if (test->dead_func_mask & (1 << i))
+ continue;
if (CHECK(finfo->type_id != test->func_info[i][1],
"incorrect func_type %u expected %u",
finfo->type_id, test->func_info[i][1])) {
@@ -4647,6 +5895,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
struct bpf_prog_info info = {};
__u32 *jited_func_lens = NULL;
__u64 cur_func_ksyms;
+ __u32 dead_insns;
int err;
jited_cnt = cnt;
@@ -4655,7 +5904,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
if (test->nr_jited_ksyms)
nr_jited_ksyms = test->nr_jited_ksyms;
else
- nr_jited_ksyms = test->func_info_cnt;
+ nr_jited_ksyms = test->func_info_cnt - test->dead_func_cnt;
nr_jited_func_lens = nr_jited_ksyms;
info_len = sizeof(struct bpf_prog_info);
@@ -4757,12 +6006,20 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
goto done;
}
+ dead_insns = 0;
+ while (test->dead_code_mask & (1 << dead_insns))
+ dead_insns++;
+
CHECK(linfo[0].insn_off, "linfo[0].insn_off:%u",
linfo[0].insn_off);
for (i = 1; i < cnt; i++) {
const struct bpf_line_info *expected_linfo;
- expected_linfo = patched_linfo + (i * test->line_info_rec_size);
+ while (test->dead_code_mask & (1 << (i + dead_insns)))
+ dead_insns++;
+
+ expected_linfo = patched_linfo +
+ ((i + dead_insns) * test->line_info_rec_size);
if (CHECK(linfo[i].insn_off <= linfo[i - 1].insn_off,
"linfo[%u].insn_off:%u <= linfo[%u].insn_off:%u",
i, linfo[i].insn_off,
@@ -4920,7 +6177,9 @@ static int do_test_info_raw(unsigned int test_num)
if (err)
goto done;
- err = test_get_linfo(test, patched_linfo, attr.line_info_cnt, prog_fd);
+ err = test_get_linfo(test, patched_linfo,
+ attr.line_info_cnt - test->dead_code_cnt,
+ prog_fd);
if (err)
goto done;
@@ -4956,20 +6215,648 @@ static int test_info_raw(void)
return err;
}
+struct btf_raw_data {
+ __u32 raw_types[MAX_NR_RAW_U32];
+ const char *str_sec;
+ __u32 str_sec_size;
+};
+
+struct btf_dedup_test {
+ const char *descr;
+ struct btf_raw_data input;
+ struct btf_raw_data expect;
+ struct btf_dedup_opts opts;
+};
+
+const struct btf_dedup_test dedup_tests[] = {
+
+{
+ .descr = "dedup: unused strings filtering",
+ .input = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 64, 8),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0unused\0int\0foo\0bar\0long"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0long"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ },
+},
+{
+ .descr = "dedup: strings deduplication",
+ .input = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8),
+ BTF_TYPE_INT_ENC(NAME_NTH(3), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_TYPE_INT_ENC(NAME_NTH(4), BTF_INT_SIGNED, 0, 64, 8),
+ BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0long int\0int\0long int\0int"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0long int"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ },
+},
+{
+ .descr = "dedup: struct example #1",
+ /*
+ * struct s {
+ * struct s *next;
+ * const int *a;
+ * int b[16];
+ * int c;
+ * }
+ */
+ .input = {
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* int[16] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */
+ /* struct s { */
+ BTF_STRUCT_ENC(NAME_NTH(2), 4, 84), /* [3] */
+ BTF_MEMBER_ENC(NAME_NTH(3), 4, 0), /* struct s *next; */
+ BTF_MEMBER_ENC(NAME_NTH(4), 5, 64), /* const int *a; */
+ BTF_MEMBER_ENC(NAME_NTH(5), 2, 128), /* int b[16]; */
+ BTF_MEMBER_ENC(NAME_NTH(6), 1, 640), /* int c; */
+ /* ptr -> [3] struct s */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> [6] const int */
+ BTF_PTR_ENC(6), /* [5] */
+ /* const -> [1] int */
+ BTF_CONST_ENC(1), /* [6] */
+
+ /* full copy of the above */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [7] */
+ BTF_TYPE_ARRAY_ENC(7, 7, 16), /* [8] */
+ BTF_STRUCT_ENC(NAME_NTH(2), 4, 84), /* [9] */
+ BTF_MEMBER_ENC(NAME_NTH(3), 10, 0),
+ BTF_MEMBER_ENC(NAME_NTH(4), 11, 64),
+ BTF_MEMBER_ENC(NAME_NTH(5), 8, 128),
+ BTF_MEMBER_ENC(NAME_NTH(6), 7, 640),
+ BTF_PTR_ENC(9), /* [10] */
+ BTF_PTR_ENC(12), /* [11] */
+ BTF_CONST_ENC(7), /* [12] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0s\0next\0a\0b\0c\0"),
+ },
+ .expect = {
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(NAME_NTH(4), BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* int[16] */
+ BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */
+ /* struct s { */
+ BTF_STRUCT_ENC(NAME_NTH(6), 4, 84), /* [3] */
+ BTF_MEMBER_ENC(NAME_NTH(5), 4, 0), /* struct s *next; */
+ BTF_MEMBER_ENC(NAME_NTH(1), 5, 64), /* const int *a; */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 128), /* int b[16]; */
+ BTF_MEMBER_ENC(NAME_NTH(3), 1, 640), /* int c; */
+ /* ptr -> [3] struct s */
+ BTF_PTR_ENC(3), /* [4] */
+ /* ptr -> [6] const int */
+ BTF_PTR_ENC(6), /* [5] */
+ /* const -> [1] int */
+ BTF_CONST_ENC(1), /* [6] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0a\0b\0c\0int\0next\0s"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ },
+},
+{
+ .descr = "dedup: struct <-> fwd resolution w/ hash collision",
+ /*
+ * // CU 1:
+ * struct x;
+ * struct s {
+ * struct x *x;
+ * };
+ * // CU 2:
+ * struct x {};
+ * struct s {
+ * struct x *x;
+ * };
+ */
+ .input = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_FWD_ENC(NAME_TBD, 0 /* struct fwd */), /* [1] fwd x */
+ BTF_PTR_ENC(1), /* [2] ptr -> [1] */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [3] struct s */
+ BTF_MEMBER_ENC(NAME_TBD, 2, 0),
+ /* CU 2 */
+ BTF_STRUCT_ENC(NAME_TBD, 0, 0), /* [4] struct x */
+ BTF_PTR_ENC(4), /* [5] ptr -> [4] */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [6] struct s */
+ BTF_MEMBER_ENC(NAME_TBD, 5, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0x\0s\0x\0x\0s\0x\0"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_PTR_ENC(3), /* [1] ptr -> [3] */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [2] struct s */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_STRUCT_ENC(NAME_NTH(2), 0, 0), /* [3] struct x */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0s\0x"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ .dedup_table_size = 1, /* force hash collisions */
+ },
+},
+{
+ .descr = "dedup: void equiv check",
+ /*
+ * // CU 1:
+ * struct s {
+ * struct {} *x;
+ * };
+ * // CU 2:
+ * struct s {
+ * int *x;
+ * };
+ */
+ .input = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */
+ BTF_PTR_ENC(1), /* [2] ptr -> [1] */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ /* CU 2 */
+ BTF_PTR_ENC(0), /* [4] ptr -> void */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */
+ BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0s\0x"),
+ },
+ .expect = {
+ .raw_types = {
+ /* CU 1 */
+ BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */
+ BTF_PTR_ENC(1), /* [2] ptr -> [1] */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */
+ BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
+ /* CU 2 */
+ BTF_PTR_ENC(0), /* [4] ptr -> void */
+ BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */
+ BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0s\0x"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ .dedup_table_size = 1, /* force hash collisions */
+ },
+},
+{
+ .descr = "dedup: all possible kinds (no duplicates)",
+ .input = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 8), /* [1] int */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 4), /* [2] enum */
+ BTF_ENUM_ENC(NAME_TBD, 0),
+ BTF_ENUM_ENC(NAME_TBD, 1),
+ BTF_FWD_ENC(NAME_TBD, 1 /* union kind_flag */), /* [3] fwd */
+ BTF_TYPE_ARRAY_ENC(2, 1, 7), /* [4] array */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 4), /* [5] struct */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_UNION_ENC(NAME_TBD, 1, 4), /* [6] union */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [7] typedef */
+ BTF_PTR_ENC(0), /* [8] ptr */
+ BTF_CONST_ENC(8), /* [9] const */
+ BTF_VOLATILE_ENC(8), /* [10] volatile */
+ BTF_RESTRICT_ENC(8), /* [11] restrict */
+ BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
+ BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 8), /* [1] int */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 4), /* [2] enum */
+ BTF_ENUM_ENC(NAME_TBD, 0),
+ BTF_ENUM_ENC(NAME_TBD, 1),
+ BTF_FWD_ENC(NAME_TBD, 1 /* union kind_flag */), /* [3] fwd */
+ BTF_TYPE_ARRAY_ENC(2, 1, 7), /* [4] array */
+ BTF_STRUCT_ENC(NAME_TBD, 1, 4), /* [5] struct */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_UNION_ENC(NAME_TBD, 1, 4), /* [6] union */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [7] typedef */
+ BTF_PTR_ENC(0), /* [8] ptr */
+ BTF_CONST_ENC(8), /* [9] const */
+ BTF_VOLATILE_ENC(8), /* [10] volatile */
+ BTF_RESTRICT_ENC(8), /* [11] restrict */
+ BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
+ BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ },
+},
+{
+ .descr = "dedup: no int duplicates",
+ .input = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 8),
+ /* different name */
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 8),
+ /* different encoding */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_CHAR, 0, 32, 8),
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_BOOL, 0, 32, 8),
+ /* different bit offset */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 8, 32, 8),
+ /* different bit size */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8),
+ /* different byte size */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0some other int"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 8),
+ /* different name */
+ BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 8),
+ /* different encoding */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_CHAR, 0, 32, 8),
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_BOOL, 0, 32, 8),
+ /* different bit offset */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 8, 32, 8),
+ /* different bit size */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8),
+ /* different byte size */
+ BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0some other int"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ },
+},
+{
+ .descr = "dedup: enum fwd resolution",
+ .input = {
+ .raw_types = {
+ /* [1] fwd enum 'e1' before full enum */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
+ /* [2] full enum 'e1' after fwd */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 123),
+ /* [3] full enum 'e2' before fwd */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(4), 456),
+ /* [4] fwd enum 'e2' after full enum */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
+ /* [5] incompatible fwd enum with different size */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1),
+ /* [6] incompatible full enum with different value */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 321),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
+ },
+ .expect = {
+ .raw_types = {
+ /* [1] full enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 123),
+ /* [2] full enum 'e2' */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(4), 456),
+ /* [3] incompatible fwd enum with different size */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1),
+ /* [4] incompatible full enum with different value */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 321),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ },
+},
+{
+ .descr = "dedup: datasec and vars pass-through",
+ .input = {
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* static int t */
+ BTF_VAR_ENC(NAME_NTH(2), 1, 0), /* [2] */
+ /* .bss section */ /* [3] */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ /* int, referenced from [5] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [4] */
+ /* another static int t */
+ BTF_VAR_ENC(NAME_NTH(2), 4, 0), /* [5] */
+ /* another .bss section */ /* [6] */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(5, 0, 4),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0.bss\0t"),
+ },
+ .expect = {
+ .raw_types = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* static int t */
+ BTF_VAR_ENC(NAME_NTH(2), 1, 0), /* [2] */
+ /* .bss section */ /* [3] */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ /* another static int t */
+ BTF_VAR_ENC(NAME_NTH(2), 1, 0), /* [4] */
+ /* another .bss section */ /* [5] */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(4, 0, 4),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0.bss\0t"),
+ },
+ .opts = {
+ .dont_resolve_fwds = false,
+ .dedup_table_size = 1
+ },
+},
+
+};
+
+static int btf_type_size(const struct btf_type *t)
+{
+ int base_size = sizeof(struct btf_type);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+ __u16 kind = BTF_INFO_KIND(t->info);
+
+ switch (kind) {
+ case BTF_KIND_FWD:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ return base_size;
+ case BTF_KIND_INT:
+ return base_size + sizeof(__u32);
+ case BTF_KIND_ENUM:
+ return base_size + vlen * sizeof(struct btf_enum);
+ case BTF_KIND_ARRAY:
+ return base_size + sizeof(struct btf_array);
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ return base_size + vlen * sizeof(struct btf_member);
+ case BTF_KIND_FUNC_PROTO:
+ return base_size + vlen * sizeof(struct btf_param);
+ case BTF_KIND_VAR:
+ return base_size + sizeof(struct btf_var);
+ case BTF_KIND_DATASEC:
+ return base_size + vlen * sizeof(struct btf_var_secinfo);
+ default:
+ fprintf(stderr, "Unsupported BTF_KIND:%u\n", kind);
+ return -EINVAL;
+ }
+}
+
+static void dump_btf_strings(const char *strs, __u32 len)
+{
+ const char *cur = strs;
+ int i = 0;
+
+ while (cur < strs + len) {
+ fprintf(stderr, "string #%d: '%s'\n", i, cur);
+ cur += strlen(cur) + 1;
+ i++;
+ }
+}
+
+static int do_test_dedup(unsigned int test_num)
+{
+ const struct btf_dedup_test *test = &dedup_tests[test_num - 1];
+ __u32 test_nr_types, expect_nr_types, test_btf_size, expect_btf_size;
+ const struct btf_header *test_hdr, *expect_hdr;
+ struct btf *test_btf = NULL, *expect_btf = NULL;
+ const void *test_btf_data, *expect_btf_data;
+ const char *ret_test_next_str, *ret_expect_next_str;
+ const char *test_strs, *expect_strs;
+ const char *test_str_cur, *test_str_end;
+ const char *expect_str_cur, *expect_str_end;
+ unsigned int raw_btf_size;
+ void *raw_btf;
+ int err = 0, i;
+
+ fprintf(stderr, "BTF dedup test[%u] (%s):", test_num, test->descr);
+
+ raw_btf = btf_raw_create(&hdr_tmpl, test->input.raw_types,
+ test->input.str_sec, test->input.str_sec_size,
+ &raw_btf_size, &ret_test_next_str);
+ if (!raw_btf)
+ return -1;
+ test_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
+ free(raw_btf);
+ if (CHECK(IS_ERR(test_btf), "invalid test_btf errno:%ld",
+ PTR_ERR(test_btf))) {
+ err = -1;
+ goto done;
+ }
+
+ raw_btf = btf_raw_create(&hdr_tmpl, test->expect.raw_types,
+ test->expect.str_sec,
+ test->expect.str_sec_size,
+ &raw_btf_size, &ret_expect_next_str);
+ if (!raw_btf)
+ return -1;
+ expect_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
+ free(raw_btf);
+ if (CHECK(IS_ERR(expect_btf), "invalid expect_btf errno:%ld",
+ PTR_ERR(expect_btf))) {
+ err = -1;
+ goto done;
+ }
+
+ err = btf__dedup(test_btf, NULL, &test->opts);
+ if (CHECK(err, "btf_dedup failed errno:%d", err)) {
+ err = -1;
+ goto done;
+ }
+
+ test_btf_data = btf__get_raw_data(test_btf, &test_btf_size);
+ expect_btf_data = btf__get_raw_data(expect_btf, &expect_btf_size);
+ if (CHECK(test_btf_size != expect_btf_size,
+ "test_btf_size:%u != expect_btf_size:%u",
+ test_btf_size, expect_btf_size)) {
+ err = -1;
+ goto done;
+ }
+
+ test_hdr = test_btf_data;
+ test_strs = test_btf_data + sizeof(*test_hdr) + test_hdr->str_off;
+ expect_hdr = expect_btf_data;
+ expect_strs = expect_btf_data + sizeof(*test_hdr) + expect_hdr->str_off;
+ if (CHECK(test_hdr->str_len != expect_hdr->str_len,
+ "test_hdr->str_len:%u != expect_hdr->str_len:%u",
+ test_hdr->str_len, expect_hdr->str_len)) {
+ fprintf(stderr, "\ntest strings:\n");
+ dump_btf_strings(test_strs, test_hdr->str_len);
+ fprintf(stderr, "\nexpected strings:\n");
+ dump_btf_strings(expect_strs, expect_hdr->str_len);
+ err = -1;
+ goto done;
+ }
+
+ test_str_cur = test_strs;
+ test_str_end = test_strs + test_hdr->str_len;
+ expect_str_cur = expect_strs;
+ expect_str_end = expect_strs + expect_hdr->str_len;
+ while (test_str_cur < test_str_end && expect_str_cur < expect_str_end) {
+ size_t test_len, expect_len;
+
+ test_len = strlen(test_str_cur);
+ expect_len = strlen(expect_str_cur);
+ if (CHECK(test_len != expect_len,
+ "test_len:%zu != expect_len:%zu "
+ "(test_str:%s, expect_str:%s)",
+ test_len, expect_len, test_str_cur, expect_str_cur)) {
+ err = -1;
+ goto done;
+ }
+ if (CHECK(strcmp(test_str_cur, expect_str_cur),
+ "test_str:%s != expect_str:%s",
+ test_str_cur, expect_str_cur)) {
+ err = -1;
+ goto done;
+ }
+ test_str_cur += test_len + 1;
+ expect_str_cur += expect_len + 1;
+ }
+ if (CHECK(test_str_cur != test_str_end,
+ "test_str_cur:%p != test_str_end:%p",
+ test_str_cur, test_str_end)) {
+ err = -1;
+ goto done;
+ }
+
+ test_nr_types = btf__get_nr_types(test_btf);
+ expect_nr_types = btf__get_nr_types(expect_btf);
+ if (CHECK(test_nr_types != expect_nr_types,
+ "test_nr_types:%u != expect_nr_types:%u",
+ test_nr_types, expect_nr_types)) {
+ err = -1;
+ goto done;
+ }
+
+ for (i = 1; i <= test_nr_types; i++) {
+ const struct btf_type *test_type, *expect_type;
+ int test_size, expect_size;
+
+ test_type = btf__type_by_id(test_btf, i);
+ expect_type = btf__type_by_id(expect_btf, i);
+ test_size = btf_type_size(test_type);
+ expect_size = btf_type_size(expect_type);
+
+ if (CHECK(test_size != expect_size,
+ "type #%d: test_size:%d != expect_size:%u",
+ i, test_size, expect_size)) {
+ err = -1;
+ goto done;
+ }
+ if (CHECK(memcmp((void *)test_type,
+ (void *)expect_type,
+ test_size),
+ "type #%d: contents differ", i)) {
+ err = -1;
+ goto done;
+ }
+ }
+
+done:
+ if (!err)
+ fprintf(stderr, "OK");
+ if (!IS_ERR(test_btf))
+ btf__free(test_btf);
+ if (!IS_ERR(expect_btf))
+ btf__free(expect_btf);
+
+ return err;
+}
+
+static int test_dedup(void)
+{
+ unsigned int i;
+ int err = 0;
+
+ if (args.dedup_test_num)
+ return count_result(do_test_dedup(args.dedup_test_num));
+
+ for (i = 1; i <= ARRAY_SIZE(dedup_tests); i++)
+ err |= count_result(do_test_dedup(i));
+
+ return err;
+}
+
static void usage(const char *cmd)
{
fprintf(stderr, "Usage: %s [-l] [[-r btf_raw_test_num (1 - %zu)] |\n"
"\t[-g btf_get_info_test_num (1 - %zu)] |\n"
"\t[-f btf_file_test_num (1 - %zu)] |\n"
"\t[-k btf_prog_info_raw_test_num (1 - %zu)] |\n"
- "\t[-p (pretty print test)]]\n",
+ "\t[-p (pretty print test)] |\n"
+ "\t[-d btf_dedup_test_num (1 - %zu)]]\n",
cmd, ARRAY_SIZE(raw_tests), ARRAY_SIZE(get_info_tests),
- ARRAY_SIZE(file_tests), ARRAY_SIZE(info_raw_tests));
+ ARRAY_SIZE(file_tests), ARRAY_SIZE(info_raw_tests),
+ ARRAY_SIZE(dedup_tests));
}
static int parse_args(int argc, char **argv)
{
- const char *optstr = "lpk:f:r:g:";
+ const char *optstr = "hlpk:f:r:g:d:";
int opt;
while ((opt = getopt(argc, argv, optstr)) != -1) {
@@ -4996,12 +6883,16 @@ static int parse_args(int argc, char **argv)
args.info_raw_test_num = atoi(optarg);
args.info_raw_test = true;
break;
+ case 'd':
+ args.dedup_test_num = atoi(optarg);
+ args.dedup_test = true;
+ break;
case 'h':
usage(argv[0]);
exit(0);
default:
- usage(argv[0]);
- return -1;
+ usage(argv[0]);
+ return -1;
}
}
@@ -5037,6 +6928,14 @@ static int parse_args(int argc, char **argv)
return -1;
}
+ if (args.dedup_test_num &&
+ (args.dedup_test_num < 1 ||
+ args.dedup_test_num > ARRAY_SIZE(dedup_tests))) {
+ fprintf(stderr, "BTF dedup test number must be [1 - %zu]\n",
+ ARRAY_SIZE(dedup_tests));
+ return -1;
+ }
+
return 0;
}
@@ -5055,7 +6954,7 @@ int main(int argc, char **argv)
return err;
if (args.always_log)
- libbpf_set_print(__base_pr, __base_pr, __base_pr);
+ libbpf_set_print(__base_pr);
if (args.raw_test)
err |= test_raw();
@@ -5072,14 +6971,18 @@ int main(int argc, char **argv)
if (args.info_raw_test)
err |= test_info_raw();
+ if (args.dedup_test)
+ err |= test_dedup();
+
if (args.raw_test || args.get_info_test || args.file_test ||
- args.pprint_test || args.info_raw_test)
+ args.pprint_test || args.info_raw_test || args.dedup_test)
goto done;
err |= test_raw();
err |= test_get_info();
err |= test_file();
err |= test_info_raw();
+ err |= test_dedup();
done:
print_summary();
diff --git a/tools/testing/selftests/bpf/test_btf.h b/tools/testing/selftests/bpf/test_btf.h
new file mode 100644
index 000000000000..2023725f1962
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_btf.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 Facebook */
+
+#ifndef _TEST_BTF_H
+#define _TEST_BTF_H
+
+#define BTF_INFO_ENC(kind, kind_flag, vlen) \
+ ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
+
+#define BTF_TYPE_ENC(name, info, size_or_type) \
+ (name), (info), (size_or_type)
+
+#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
+ ((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
+#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
+ BTF_INT_ENC(encoding, bits_offset, bits)
+
+#define BTF_FWD_ENC(name, kind_flag) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FWD, kind_flag, 0), 0)
+
+#define BTF_ARRAY_ENC(type, index_type, nr_elems) \
+ (type), (index_type), (nr_elems)
+#define BTF_TYPE_ARRAY_ENC(type, index_type, nr_elems) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0), \
+ BTF_ARRAY_ENC(type, index_type, nr_elems)
+
+#define BTF_STRUCT_ENC(name, nr_elems, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, nr_elems), sz)
+
+#define BTF_UNION_ENC(name, nr_elems, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_UNION, 0, nr_elems), sz)
+
+#define BTF_VAR_ENC(name, type, linkage) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), type), (linkage)
+#define BTF_VAR_SECINFO_ENC(type, offset, size) \
+ (type), (offset), (size)
+
+#define BTF_MEMBER_ENC(name, type, bits_offset) \
+ (name), (type), (bits_offset)
+#define BTF_ENUM_ENC(name, val) (name), (val)
+#define BTF_MEMBER_OFFSET(bitfield_size, bits_offset) \
+ ((bitfield_size) << 24 | (bits_offset))
+
+#define BTF_TYPEDEF_ENC(name, type) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), type)
+
+#define BTF_PTR_ENC(type) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), type)
+
+#define BTF_CONST_ENC(type) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), type)
+
+#define BTF_VOLATILE_ENC(type) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), type)
+
+#define BTF_RESTRICT_ENC(type) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_RESTRICT, 0, 0), type)
+
+#define BTF_FUNC_PROTO_ENC(ret_type, nargs) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, nargs), ret_type)
+
+#define BTF_FUNC_PROTO_ARG_ENC(name, type) \
+ (name), (type)
+
+#define BTF_FUNC_ENC(name, func_proto) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), func_proto)
+
+#endif /* _TEST_BTF_H */
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
index 76e4993b7c16..d850fb9076b5 100644
--- a/tools/testing/selftests/bpf/test_dev_cgroup.c
+++ b/tools/testing/selftests/bpf/test_dev_cgroup.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017 Facebook
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/bpf/test_flow_dissector.c b/tools/testing/selftests/bpf/test_flow_dissector.c
index 12b784afba31..01f0c634d548 100644
--- a/tools/testing/selftests/bpf/test_flow_dissector.c
+++ b/tools/testing/selftests/bpf/test_flow_dissector.c
@@ -16,7 +16,6 @@
#include <errno.h>
#include <linux/if_packet.h>
#include <linux/if_ether.h>
-#include <linux/if_packet.h>
#include <linux/ipv6.h>
#include <netinet/ip.h>
#include <netinet/in.h>
@@ -25,7 +24,6 @@
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
-#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
diff --git a/tools/testing/selftests/bpf/test_iptunnel_common.h b/tools/testing/selftests/bpf/test_iptunnel_common.h
index e4cd252a1b20..1d5ba839ddea 100644
--- a/tools/testing/selftests/bpf/test_iptunnel_common.h
+++ b/tools/testing/selftests/bpf/test_iptunnel_common.h
@@ -1,8 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2016 Facebook
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
*/
#ifndef _TEST_IPTNL_COMMON_H
#define _TEST_IPTNL_COMMON_H
diff --git a/tools/testing/selftests/bpf/test_libbpf_open.c b/tools/testing/selftests/bpf/test_libbpf_open.c
index 8fcd1c076add..9e9db202d218 100644
--- a/tools/testing/selftests/bpf/test_libbpf_open.c
+++ b/tools/testing/selftests/bpf/test_libbpf_open.c
@@ -11,6 +11,8 @@ static const char *__doc__ =
#include <bpf/libbpf.h>
#include <getopt.h>
+#include "bpf_rlimit.h"
+
static const struct option long_options[] = {
{"help", no_argument, NULL, 'h' },
{"debug", no_argument, NULL, 'D' },
@@ -34,23 +36,16 @@ static void usage(char *argv[])
printf("\n");
}
-#define DEFINE_PRINT_FN(name, enabled) \
-static int libbpf_##name(const char *fmt, ...) \
-{ \
- va_list args; \
- int ret; \
- \
- va_start(args, fmt); \
- if (enabled) { \
- fprintf(stderr, "[" #name "] "); \
- ret = vfprintf(stderr, fmt, args); \
- } \
- va_end(args); \
- return ret; \
+static bool debug = 0;
+static int libbpf_debug_print(enum libbpf_print_level level,
+ const char *fmt, va_list args)
+{
+ if (level == LIBBPF_DEBUG && !debug)
+ return 0;
+
+ fprintf(stderr, "[%d] ", level);
+ return vfprintf(stderr, fmt, args);
}
-DEFINE_PRINT_FN(warning, 1)
-DEFINE_PRINT_FN(info, 1)
-DEFINE_PRINT_FN(debug, 1)
#define EXIT_FAIL_LIBBPF EXIT_FAILURE
#define EXIT_FAIL_OPTION 2
@@ -74,7 +69,7 @@ int test_walk_maps(struct bpf_object *obj, bool verbose)
struct bpf_map *map;
int cnt = 0;
- bpf_map__for_each(map, obj) {
+ bpf_object__for_each_map(map, obj) {
cnt++;
if (verbose)
printf("Map (count:%d) name: %s\n", cnt,
@@ -120,15 +115,14 @@ int main(int argc, char **argv)
int longindex = 0;
int opt;
- libbpf_set_print(libbpf_warning, libbpf_info, NULL);
+ libbpf_set_print(libbpf_debug_print);
/* Parse commands line args */
while ((opt = getopt_long(argc, argv, "hDq",
long_options, &longindex)) != -1) {
switch (opt) {
case 'D':
- libbpf_set_print(libbpf_warning, libbpf_info,
- libbpf_debug);
+ debug = 1;
break;
case 'q': /* Use in scripting mode */
verbose = 0;
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
index 02d7c871862a..006be3963977 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -573,13 +573,13 @@ static void test_lpm_get_next_key(void)
/* add one more element (total two) */
key_p->prefixlen = 24;
- inet_pton(AF_INET, "192.168.0.0", key_p->data);
+ inet_pton(AF_INET, "192.168.128.0", key_p->data);
assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
memset(key_p, 0, key_size);
assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
- key_p->data[1] == 168 && key_p->data[2] == 0);
+ key_p->data[1] == 168 && key_p->data[2] == 128);
memset(next_key_p, 0, key_size);
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
@@ -592,7 +592,7 @@ static void test_lpm_get_next_key(void)
/* Add one more element (total three) */
key_p->prefixlen = 24;
- inet_pton(AF_INET, "192.168.128.0", key_p->data);
+ inet_pton(AF_INET, "192.168.0.0", key_p->data);
assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
memset(key_p, 0, key_size);
@@ -643,6 +643,41 @@ static void test_lpm_get_next_key(void)
assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
errno == ENOENT);
+ /* Add one more element (total five) */
+ key_p->prefixlen = 28;
+ inet_pton(AF_INET, "192.168.1.128", key_p->data);
+ assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0);
+
+ memset(key_p, 0, key_size);
+ assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0);
+ assert(key_p->prefixlen == 24 && key_p->data[0] == 192 &&
+ key_p->data[1] == 168 && key_p->data[2] == 0);
+
+ memset(next_key_p, 0, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 28 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168 && next_key_p->data[2] == 1 &&
+ next_key_p->data[3] == 128);
+
+ memcpy(key_p, next_key_p, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168 && next_key_p->data[2] == 1);
+
+ memcpy(key_p, next_key_p, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168 && next_key_p->data[2] == 128);
+
+ memcpy(key_p, next_key_p, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0);
+ assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 &&
+ next_key_p->data[1] == 168);
+
+ memcpy(key_p, next_key_p, key_size);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
+ errno == ENOENT);
+
/* no exact matching key should return the first one in post order */
key_p->prefixlen = 22;
inet_pton(AF_INET, "192.168.1.0", key_p->data);
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index 781c7de343be..6a5349f9eb14 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016 Facebook
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
*/
#define _GNU_SOURCE
#include <stdio.h>
@@ -18,9 +15,11 @@
#include <sys/wait.h>
#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
#include "bpf_util.h"
#include "bpf_rlimit.h"
+#include "../../../include/linux/filter.h"
#define LOCAL_FREE_TARGET (128)
#define PERCPU_FREE_TARGET (4)
@@ -40,6 +39,68 @@ static int create_map(int map_type, int map_flags, unsigned int size)
return map_fd;
}
+static int bpf_map_lookup_elem_with_ref_bit(int fd, unsigned long long key,
+ void *value)
+{
+ struct bpf_load_program_attr prog;
+ struct bpf_create_map_attr map;
+ struct bpf_insn insns[] = {
+ BPF_LD_MAP_VALUE(BPF_REG_9, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, fd),
+ BPF_LD_IMM64(BPF_REG_3, key),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_9, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ };
+ __u8 data[64] = {};
+ int mfd, pfd, ret, zero = 0;
+ __u32 retval = 0;
+
+ memset(&map, 0, sizeof(map));
+ map.map_type = BPF_MAP_TYPE_ARRAY;
+ map.key_size = sizeof(int);
+ map.value_size = sizeof(unsigned long long);
+ map.max_entries = 1;
+
+ mfd = bpf_create_map_xattr(&map);
+ if (mfd < 0)
+ return -1;
+
+ insns[0].imm = mfd;
+
+ memset(&prog, 0, sizeof(prog));
+ prog.prog_type = BPF_PROG_TYPE_SCHED_CLS;
+ prog.insns = insns;
+ prog.insns_cnt = ARRAY_SIZE(insns);
+ prog.license = "GPL";
+
+ pfd = bpf_load_program_xattr(&prog, NULL, 0);
+ if (pfd < 0) {
+ close(mfd);
+ return -1;
+ }
+
+ ret = bpf_prog_test_run(pfd, 1, data, sizeof(data),
+ NULL, NULL, &retval, NULL);
+ if (ret < 0 || retval != 42) {
+ ret = -1;
+ } else {
+ assert(!bpf_map_lookup_elem(mfd, &zero, value));
+ ret = 0;
+ }
+ close(pfd);
+ close(mfd);
+ return ret;
+}
+
static int map_subset(int map0, int map1)
{
unsigned long long next_key = 0;
@@ -87,7 +148,7 @@ static int sched_next_online(int pid, int *next_to_try)
return ret;
}
-/* Size of the LRU amp is 2
+/* Size of the LRU map is 2
* Add key=1 (+1 key)
* Add key=2 (+1 key)
* Lookup Key=1
@@ -157,7 +218,7 @@ static void test_lru_sanity0(int map_type, int map_flags)
* stop LRU from removing key=1
*/
key = 1;
- assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
+ assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
assert(value[0] == 1234);
key = 3;
@@ -167,7 +228,8 @@ static void test_lru_sanity0(int map_type, int map_flags)
/* key=2 has been removed from the LRU */
key = 2;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
+ errno == ENOENT);
assert(map_equal(lru_map_fd, expected_map_fd));
@@ -221,7 +283,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
/* Lookup 1 to tgt_free/2 */
end_key = 1 + batch_size;
for (key = 1; key < end_key; key++) {
- assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
+ assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
}
@@ -322,10 +384,11 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
end_key = 1 + batch_size;
value[0] = 4321;
for (key = 1; key < end_key; key++) {
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value));
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
+ errno == ENOENT);
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
- assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
+ assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
assert(value[0] == 4321);
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
@@ -404,7 +467,7 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
/* Lookup key 1 to tgt_free*3/2 */
end_key = tgt_free + batch_size;
for (key = 1; key < end_key; key++) {
- assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
+ assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
}
@@ -463,7 +526,7 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
for (key = 1; key <= tgt_free; key++) {
- assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
+ assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
}
@@ -494,16 +557,16 @@ static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
unsigned long long key, value[nr_cpus];
/* Ensure the last key inserted by previous CPU can be found */
- assert(!bpf_map_lookup_elem(map_fd, &last_key, value));
-
+ assert(!bpf_map_lookup_elem_with_ref_bit(map_fd, last_key, value));
value[0] = 1234;
key = last_key + 1;
assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
- assert(!bpf_map_lookup_elem(map_fd, &key, value));
+ assert(!bpf_map_lookup_elem_with_ref_bit(map_fd, key, value));
/* Cannot find the last key because it was removed by LRU */
- assert(bpf_map_lookup_elem(map_fd, &last_key, value));
+ assert(bpf_map_lookup_elem(map_fd, &last_key, value) == -1 &&
+ errno == ENOENT);
}
/* Test map with only one element */
@@ -590,8 +653,8 @@ static void test_lru_sanity6(int map_type, int map_flags, int tgt_free)
/* Make ref bit sticky for key: [1, tgt_free] */
for (stable_key = 1; stable_key <= tgt_free; stable_key++) {
/* Mark the ref bit */
- assert(!bpf_map_lookup_elem(lru_map_fd, &stable_key,
- value));
+ assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd,
+ stable_key, value));
}
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
@@ -612,6 +675,198 @@ static void test_lru_sanity6(int map_type, int map_flags, int tgt_free)
printf("Pass\n");
}
+/* Size of the LRU map is 2
+ * Add key=1 (+1 key)
+ * Add key=2 (+1 key)
+ * Lookup Key=1 (datapath)
+ * Lookup Key=2 (syscall)
+ * Add Key=3
+ * => Key=2 will be removed by LRU
+ * Iterate map. Only found key=1 and key=3
+ */
+static void test_lru_sanity7(int map_type, int map_flags)
+{
+ unsigned long long key, value[nr_cpus];
+ int lru_map_fd, expected_map_fd;
+ int next_cpu = 0;
+
+ printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
+ map_flags);
+
+ assert(sched_next_online(0, &next_cpu) != -1);
+
+ if (map_flags & BPF_F_NO_COMMON_LRU)
+ lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
+ else
+ lru_map_fd = create_map(map_type, map_flags, 2);
+ assert(lru_map_fd != -1);
+
+ expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2);
+ assert(expected_map_fd != -1);
+
+ value[0] = 1234;
+
+ /* insert key=1 element */
+
+ key = 1;
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
+
+ /* BPF_NOEXIST means: add new element if it doesn't exist */
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1
+ /* key=1 already exists */
+ && errno == EEXIST);
+
+ /* insert key=2 element */
+
+ /* check that key=2 is not found */
+ key = 2;
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
+ errno == ENOENT);
+
+ /* BPF_EXIST means: update existing element */
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
+ /* key=2 is not there */
+ errno == ENOENT);
+
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+
+ /* insert key=3 element */
+
+ /* check that key=3 is not found */
+ key = 3;
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
+ errno == ENOENT);
+
+ /* check that key=1 can be found and mark the ref bit to
+ * stop LRU from removing key=1
+ */
+ key = 1;
+ assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
+ assert(value[0] == 1234);
+
+ /* check that key=2 can be found and do _not_ mark ref bit.
+ * this will be evicted on next update.
+ */
+ key = 2;
+ assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
+ assert(value[0] == 1234);
+
+ key = 3;
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
+
+ /* key=2 has been removed from the LRU */
+ key = 2;
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
+ errno == ENOENT);
+
+ assert(map_equal(lru_map_fd, expected_map_fd));
+
+ close(expected_map_fd);
+ close(lru_map_fd);
+
+ printf("Pass\n");
+}
+
+/* Size of the LRU map is 2
+ * Add key=1 (+1 key)
+ * Add key=2 (+1 key)
+ * Lookup Key=1 (syscall)
+ * Lookup Key=2 (datapath)
+ * Add Key=3
+ * => Key=1 will be removed by LRU
+ * Iterate map. Only found key=2 and key=3
+ */
+static void test_lru_sanity8(int map_type, int map_flags)
+{
+ unsigned long long key, value[nr_cpus];
+ int lru_map_fd, expected_map_fd;
+ int next_cpu = 0;
+
+ printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
+ map_flags);
+
+ assert(sched_next_online(0, &next_cpu) != -1);
+
+ if (map_flags & BPF_F_NO_COMMON_LRU)
+ lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
+ else
+ lru_map_fd = create_map(map_type, map_flags, 2);
+ assert(lru_map_fd != -1);
+
+ expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2);
+ assert(expected_map_fd != -1);
+
+ value[0] = 1234;
+
+ /* insert key=1 element */
+
+ key = 1;
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+
+ /* BPF_NOEXIST means: add new element if it doesn't exist */
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1
+ /* key=1 already exists */
+ && errno == EEXIST);
+
+ /* insert key=2 element */
+
+ /* check that key=2 is not found */
+ key = 2;
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
+ errno == ENOENT);
+
+ /* BPF_EXIST means: update existing element */
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
+ /* key=2 is not there */
+ errno == ENOENT);
+
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
+
+ /* insert key=3 element */
+
+ /* check that key=3 is not found */
+ key = 3;
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
+ errno == ENOENT);
+
+ /* check that key=1 can be found and do _not_ mark ref bit.
+ * this will be evicted on next update.
+ */
+ key = 1;
+ assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
+ assert(value[0] == 1234);
+
+ /* check that key=2 can be found and mark the ref bit to
+ * stop LRU from removing key=2
+ */
+ key = 2;
+ assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
+ assert(value[0] == 1234);
+
+ key = 3;
+ assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+ assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+ BPF_NOEXIST));
+
+ /* key=1 has been removed from the LRU */
+ key = 1;
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
+ errno == ENOENT);
+
+ assert(map_equal(lru_map_fd, expected_map_fd));
+
+ close(expected_map_fd);
+ close(lru_map_fd);
+
+ printf("Pass\n");
+}
+
int main(int argc, char **argv)
{
int map_types[] = {BPF_MAP_TYPE_LRU_HASH,
@@ -637,6 +892,8 @@ int main(int argc, char **argv)
test_lru_sanity4(map_types[t], map_flags[f], tgt_free);
test_lru_sanity5(map_types[t], map_flags[f]);
test_lru_sanity6(map_types[t], map_flags[f], tgt_free);
+ test_lru_sanity7(map_types[t], map_flags[f]);
+ test_lru_sanity8(map_types[t], map_flags[f]);
printf("\n");
}
diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
new file mode 100755
index 000000000000..acf7a74f97cd
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
@@ -0,0 +1,464 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Setup/topology:
+#
+# NS1 NS2 NS3
+# veth1 <---> veth2 veth3 <---> veth4 (the top route)
+# veth5 <---> veth6 veth7 <---> veth8 (the bottom route)
+#
+# each vethN gets IPv[4|6]_N address
+#
+# IPv*_SRC = IPv*_1
+# IPv*_DST = IPv*_4
+#
+# all tests test pings from IPv*_SRC to IPv*_DST
+#
+# by default, routes are configured to allow packets to go
+# IP*_1 <=> IP*_2 <=> IP*_3 <=> IP*_4 (the top route)
+#
+# a GRE device is installed in NS3 with IPv*_GRE, and
+# NS1/NS2 are configured to route packets to IPv*_GRE via IP*_8
+# (the bottom route)
+#
+# Tests:
+#
+# 1. routes NS2->IPv*_DST are brought down, so the only way a ping
+# from IP*_SRC to IP*_DST can work is via IPv*_GRE
+#
+# 2a. in an egress test, a bpf LWT_XMIT program is installed on veth1
+# that encaps the packets with an IP/GRE header to route to IPv*_GRE
+#
+# ping: SRC->[encap at veth1:egress]->GRE:decap->DST
+# ping replies go DST->SRC directly
+#
+# 2b. in an ingress test, a bpf LWT_IN program is installed on veth2
+# that encaps the packets with an IP/GRE header to route to IPv*_GRE
+#
+# ping: SRC->[encap at veth2:ingress]->GRE:decap->DST
+# ping replies go DST->SRC directly
+
+if [[ $EUID -ne 0 ]]; then
+ echo "This script must be run as root"
+ echo "FAIL"
+ exit 1
+fi
+
+readonly NS1="ns1-$(mktemp -u XXXXXX)"
+readonly NS2="ns2-$(mktemp -u XXXXXX)"
+readonly NS3="ns3-$(mktemp -u XXXXXX)"
+
+readonly IPv4_1="172.16.1.100"
+readonly IPv4_2="172.16.2.100"
+readonly IPv4_3="172.16.3.100"
+readonly IPv4_4="172.16.4.100"
+readonly IPv4_5="172.16.5.100"
+readonly IPv4_6="172.16.6.100"
+readonly IPv4_7="172.16.7.100"
+readonly IPv4_8="172.16.8.100"
+readonly IPv4_GRE="172.16.16.100"
+
+readonly IPv4_SRC=$IPv4_1
+readonly IPv4_DST=$IPv4_4
+
+readonly IPv6_1="fb01::1"
+readonly IPv6_2="fb02::1"
+readonly IPv6_3="fb03::1"
+readonly IPv6_4="fb04::1"
+readonly IPv6_5="fb05::1"
+readonly IPv6_6="fb06::1"
+readonly IPv6_7="fb07::1"
+readonly IPv6_8="fb08::1"
+readonly IPv6_GRE="fb10::1"
+
+readonly IPv6_SRC=$IPv6_1
+readonly IPv6_DST=$IPv6_4
+
+TEST_STATUS=0
+TESTS_SUCCEEDED=0
+TESTS_FAILED=0
+
+TMPFILE=""
+
+process_test_results()
+{
+ if [[ "${TEST_STATUS}" -eq 0 ]] ; then
+ echo "PASS"
+ TESTS_SUCCEEDED=$((TESTS_SUCCEEDED+1))
+ else
+ echo "FAIL"
+ TESTS_FAILED=$((TESTS_FAILED+1))
+ fi
+}
+
+print_test_summary_and_exit()
+{
+ echo "passed tests: ${TESTS_SUCCEEDED}"
+ echo "failed tests: ${TESTS_FAILED}"
+ if [ "${TESTS_FAILED}" -eq "0" ] ; then
+ exit 0
+ else
+ exit 1
+ fi
+}
+
+setup()
+{
+ set -e # exit on error
+ TEST_STATUS=0
+
+ # create devices and namespaces
+ ip netns add "${NS1}"
+ ip netns add "${NS2}"
+ ip netns add "${NS3}"
+
+ ip link add veth1 type veth peer name veth2
+ ip link add veth3 type veth peer name veth4
+ ip link add veth5 type veth peer name veth6
+ ip link add veth7 type veth peer name veth8
+
+ ip netns exec ${NS2} sysctl -wq net.ipv4.ip_forward=1
+ ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.forwarding=1
+
+ ip link set veth1 netns ${NS1}
+ ip link set veth2 netns ${NS2}
+ ip link set veth3 netns ${NS2}
+ ip link set veth4 netns ${NS3}
+ ip link set veth5 netns ${NS1}
+ ip link set veth6 netns ${NS2}
+ ip link set veth7 netns ${NS2}
+ ip link set veth8 netns ${NS3}
+
+ if [ ! -z "${VRF}" ] ; then
+ ip -netns ${NS1} link add red type vrf table 1001
+ ip -netns ${NS1} link set red up
+ ip -netns ${NS1} route add table 1001 unreachable default metric 8192
+ ip -netns ${NS1} -6 route add table 1001 unreachable default metric 8192
+ ip -netns ${NS1} link set veth1 vrf red
+ ip -netns ${NS1} link set veth5 vrf red
+
+ ip -netns ${NS2} link add red type vrf table 1001
+ ip -netns ${NS2} link set red up
+ ip -netns ${NS2} route add table 1001 unreachable default metric 8192
+ ip -netns ${NS2} -6 route add table 1001 unreachable default metric 8192
+ ip -netns ${NS2} link set veth2 vrf red
+ ip -netns ${NS2} link set veth3 vrf red
+ ip -netns ${NS2} link set veth6 vrf red
+ ip -netns ${NS2} link set veth7 vrf red
+ fi
+
+ # configure addesses: the top route (1-2-3-4)
+ ip -netns ${NS1} addr add ${IPv4_1}/24 dev veth1
+ ip -netns ${NS2} addr add ${IPv4_2}/24 dev veth2
+ ip -netns ${NS2} addr add ${IPv4_3}/24 dev veth3
+ ip -netns ${NS3} addr add ${IPv4_4}/24 dev veth4
+ ip -netns ${NS1} -6 addr add ${IPv6_1}/128 nodad dev veth1
+ ip -netns ${NS2} -6 addr add ${IPv6_2}/128 nodad dev veth2
+ ip -netns ${NS2} -6 addr add ${IPv6_3}/128 nodad dev veth3
+ ip -netns ${NS3} -6 addr add ${IPv6_4}/128 nodad dev veth4
+
+ # configure addresses: the bottom route (5-6-7-8)
+ ip -netns ${NS1} addr add ${IPv4_5}/24 dev veth5
+ ip -netns ${NS2} addr add ${IPv4_6}/24 dev veth6
+ ip -netns ${NS2} addr add ${IPv4_7}/24 dev veth7
+ ip -netns ${NS3} addr add ${IPv4_8}/24 dev veth8
+ ip -netns ${NS1} -6 addr add ${IPv6_5}/128 nodad dev veth5
+ ip -netns ${NS2} -6 addr add ${IPv6_6}/128 nodad dev veth6
+ ip -netns ${NS2} -6 addr add ${IPv6_7}/128 nodad dev veth7
+ ip -netns ${NS3} -6 addr add ${IPv6_8}/128 nodad dev veth8
+
+ ip -netns ${NS1} link set dev veth1 up
+ ip -netns ${NS2} link set dev veth2 up
+ ip -netns ${NS2} link set dev veth3 up
+ ip -netns ${NS3} link set dev veth4 up
+ ip -netns ${NS1} link set dev veth5 up
+ ip -netns ${NS2} link set dev veth6 up
+ ip -netns ${NS2} link set dev veth7 up
+ ip -netns ${NS3} link set dev veth8 up
+
+ # configure routes: IP*_SRC -> veth1/IP*_2 (= top route) default;
+ # the bottom route to specific bottom addresses
+
+ # NS1
+ # top route
+ ip -netns ${NS1} route add ${IPv4_2}/32 dev veth1 ${VRF}
+ ip -netns ${NS1} route add default dev veth1 via ${IPv4_2} ${VRF} # go top by default
+ ip -netns ${NS1} -6 route add ${IPv6_2}/128 dev veth1 ${VRF}
+ ip -netns ${NS1} -6 route add default dev veth1 via ${IPv6_2} ${VRF} # go top by default
+ # bottom route
+ ip -netns ${NS1} route add ${IPv4_6}/32 dev veth5 ${VRF}
+ ip -netns ${NS1} route add ${IPv4_7}/32 dev veth5 via ${IPv4_6} ${VRF}
+ ip -netns ${NS1} route add ${IPv4_8}/32 dev veth5 via ${IPv4_6} ${VRF}
+ ip -netns ${NS1} -6 route add ${IPv6_6}/128 dev veth5 ${VRF}
+ ip -netns ${NS1} -6 route add ${IPv6_7}/128 dev veth5 via ${IPv6_6} ${VRF}
+ ip -netns ${NS1} -6 route add ${IPv6_8}/128 dev veth5 via ${IPv6_6} ${VRF}
+
+ # NS2
+ # top route
+ ip -netns ${NS2} route add ${IPv4_1}/32 dev veth2 ${VRF}
+ ip -netns ${NS2} route add ${IPv4_4}/32 dev veth3 ${VRF}
+ ip -netns ${NS2} -6 route add ${IPv6_1}/128 dev veth2 ${VRF}
+ ip -netns ${NS2} -6 route add ${IPv6_4}/128 dev veth3 ${VRF}
+ # bottom route
+ ip -netns ${NS2} route add ${IPv4_5}/32 dev veth6 ${VRF}
+ ip -netns ${NS2} route add ${IPv4_8}/32 dev veth7 ${VRF}
+ ip -netns ${NS2} -6 route add ${IPv6_5}/128 dev veth6 ${VRF}
+ ip -netns ${NS2} -6 route add ${IPv6_8}/128 dev veth7 ${VRF}
+
+ # NS3
+ # top route
+ ip -netns ${NS3} route add ${IPv4_3}/32 dev veth4
+ ip -netns ${NS3} route add ${IPv4_1}/32 dev veth4 via ${IPv4_3}
+ ip -netns ${NS3} route add ${IPv4_2}/32 dev veth4 via ${IPv4_3}
+ ip -netns ${NS3} -6 route add ${IPv6_3}/128 dev veth4
+ ip -netns ${NS3} -6 route add ${IPv6_1}/128 dev veth4 via ${IPv6_3}
+ ip -netns ${NS3} -6 route add ${IPv6_2}/128 dev veth4 via ${IPv6_3}
+ # bottom route
+ ip -netns ${NS3} route add ${IPv4_7}/32 dev veth8
+ ip -netns ${NS3} route add ${IPv4_5}/32 dev veth8 via ${IPv4_7}
+ ip -netns ${NS3} route add ${IPv4_6}/32 dev veth8 via ${IPv4_7}
+ ip -netns ${NS3} -6 route add ${IPv6_7}/128 dev veth8
+ ip -netns ${NS3} -6 route add ${IPv6_5}/128 dev veth8 via ${IPv6_7}
+ ip -netns ${NS3} -6 route add ${IPv6_6}/128 dev veth8 via ${IPv6_7}
+
+ # configure IPv4 GRE device in NS3, and a route to it via the "bottom" route
+ ip -netns ${NS3} tunnel add gre_dev mode gre remote ${IPv4_1} local ${IPv4_GRE} ttl 255
+ ip -netns ${NS3} link set gre_dev up
+ ip -netns ${NS3} addr add ${IPv4_GRE} dev gre_dev
+ ip -netns ${NS1} route add ${IPv4_GRE}/32 dev veth5 via ${IPv4_6} ${VRF}
+ ip -netns ${NS2} route add ${IPv4_GRE}/32 dev veth7 via ${IPv4_8} ${VRF}
+
+
+ # configure IPv6 GRE device in NS3, and a route to it via the "bottom" route
+ ip -netns ${NS3} -6 tunnel add name gre6_dev mode ip6gre remote ${IPv6_1} local ${IPv6_GRE} ttl 255
+ ip -netns ${NS3} link set gre6_dev up
+ ip -netns ${NS3} -6 addr add ${IPv6_GRE} nodad dev gre6_dev
+ ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6} ${VRF}
+ ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8} ${VRF}
+
+ # rp_filter gets confused by what these tests are doing, so disable it
+ ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
+
+ TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX)
+
+ sleep 1 # reduce flakiness
+ set +e
+}
+
+cleanup()
+{
+ if [ -f ${TMPFILE} ] ; then
+ rm ${TMPFILE}
+ fi
+
+ ip netns del ${NS1} 2> /dev/null
+ ip netns del ${NS2} 2> /dev/null
+ ip netns del ${NS3} 2> /dev/null
+}
+
+trap cleanup EXIT
+
+remove_routes_to_gredev()
+{
+ ip -netns ${NS1} route del ${IPv4_GRE} dev veth5 ${VRF}
+ ip -netns ${NS2} route del ${IPv4_GRE} dev veth7 ${VRF}
+ ip -netns ${NS1} -6 route del ${IPv6_GRE}/128 dev veth5 ${VRF}
+ ip -netns ${NS2} -6 route del ${IPv6_GRE}/128 dev veth7 ${VRF}
+}
+
+add_unreachable_routes_to_gredev()
+{
+ ip -netns ${NS1} route add unreachable ${IPv4_GRE}/32 ${VRF}
+ ip -netns ${NS2} route add unreachable ${IPv4_GRE}/32 ${VRF}
+ ip -netns ${NS1} -6 route add unreachable ${IPv6_GRE}/128 ${VRF}
+ ip -netns ${NS2} -6 route add unreachable ${IPv6_GRE}/128 ${VRF}
+}
+
+test_ping()
+{
+ local readonly PROTO=$1
+ local readonly EXPECTED=$2
+ local RET=0
+
+ if [ "${PROTO}" == "IPv4" ] ; then
+ ip netns exec ${NS1} ping -c 1 -W 1 -I veth1 ${IPv4_DST} 2>&1 > /dev/null
+ RET=$?
+ elif [ "${PROTO}" == "IPv6" ] ; then
+ ip netns exec ${NS1} ping6 -c 1 -W 6 -I veth1 ${IPv6_DST} 2>&1 > /dev/null
+ RET=$?
+ else
+ echo " test_ping: unknown PROTO: ${PROTO}"
+ TEST_STATUS=1
+ fi
+
+ if [ "0" != "${RET}" ]; then
+ RET=1
+ fi
+
+ if [ "${EXPECTED}" != "${RET}" ] ; then
+ echo " test_ping failed: expected: ${EXPECTED}; got ${RET}"
+ TEST_STATUS=1
+ fi
+}
+
+test_gso()
+{
+ local readonly PROTO=$1
+ local readonly PKT_SZ=5000
+ local IP_DST=""
+ : > ${TMPFILE} # trim the capture file
+
+ # check that nc is present
+ command -v nc >/dev/null 2>&1 || \
+ { echo >&2 "nc is not available: skipping TSO tests"; return; }
+
+ # listen on IPv*_DST, capture TCP into $TMPFILE
+ if [ "${PROTO}" == "IPv4" ] ; then
+ IP_DST=${IPv4_DST}
+ ip netns exec ${NS3} bash -c \
+ "nc -4 -l -s ${IPv4_DST} -p 9000 > ${TMPFILE} &"
+ elif [ "${PROTO}" == "IPv6" ] ; then
+ IP_DST=${IPv6_DST}
+ ip netns exec ${NS3} bash -c \
+ "nc -6 -l -s ${IPv6_DST} -p 9000 > ${TMPFILE} &"
+ RET=$?
+ else
+ echo " test_gso: unknown PROTO: ${PROTO}"
+ TEST_STATUS=1
+ fi
+ sleep 1 # let nc start listening
+
+ # send a packet larger than MTU
+ ip netns exec ${NS1} bash -c \
+ "dd if=/dev/zero bs=$PKT_SZ count=1 > /dev/tcp/${IP_DST}/9000 2>/dev/null"
+ sleep 2 # let the packet get delivered
+
+ # verify we received all expected bytes
+ SZ=$(stat -c %s ${TMPFILE})
+ if [ "$SZ" != "$PKT_SZ" ] ; then
+ echo " test_gso failed: ${PROTO}"
+ TEST_STATUS=1
+ fi
+}
+
+test_egress()
+{
+ local readonly ENCAP=$1
+ echo "starting egress ${ENCAP} encap test ${VRF}"
+ setup
+
+ # by default, pings work
+ test_ping IPv4 0
+ test_ping IPv6 0
+
+ # remove NS2->DST routes, ping fails
+ ip -netns ${NS2} route del ${IPv4_DST}/32 dev veth3 ${VRF}
+ ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3 ${VRF}
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ # install replacement routes (LWT/eBPF), pings succeed
+ if [ "${ENCAP}" == "IPv4" ] ; then
+ ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \
+ test_lwt_ip_encap.o sec encap_gre dev veth1 ${VRF}
+ ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \
+ test_lwt_ip_encap.o sec encap_gre dev veth1 ${VRF}
+ elif [ "${ENCAP}" == "IPv6" ] ; then
+ ip -netns ${NS1} route add ${IPv4_DST} encap bpf xmit obj \
+ test_lwt_ip_encap.o sec encap_gre6 dev veth1 ${VRF}
+ ip -netns ${NS1} -6 route add ${IPv6_DST} encap bpf xmit obj \
+ test_lwt_ip_encap.o sec encap_gre6 dev veth1 ${VRF}
+ else
+ echo " unknown encap ${ENCAP}"
+ TEST_STATUS=1
+ fi
+ test_ping IPv4 0
+ test_ping IPv6 0
+
+ # skip GSO tests with VRF: VRF routing needs properly assigned
+ # source IP/device, which is easy to do with ping and hard with dd/nc.
+ if [ -z "${VRF}" ] ; then
+ test_gso IPv4
+ test_gso IPv6
+ fi
+
+ # a negative test: remove routes to GRE devices: ping fails
+ remove_routes_to_gredev
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ # another negative test
+ add_unreachable_routes_to_gredev
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ cleanup
+ process_test_results
+}
+
+test_ingress()
+{
+ local readonly ENCAP=$1
+ echo "starting ingress ${ENCAP} encap test ${VRF}"
+ setup
+
+ # need to wait a bit for IPv6 to autoconf, otherwise
+ # ping6 sometimes fails with "unable to bind to address"
+
+ # by default, pings work
+ test_ping IPv4 0
+ test_ping IPv6 0
+
+ # remove NS2->DST routes, pings fail
+ ip -netns ${NS2} route del ${IPv4_DST}/32 dev veth3 ${VRF}
+ ip -netns ${NS2} -6 route del ${IPv6_DST}/128 dev veth3 ${VRF}
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ # install replacement routes (LWT/eBPF), pings succeed
+ if [ "${ENCAP}" == "IPv4" ] ; then
+ ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \
+ test_lwt_ip_encap.o sec encap_gre dev veth2 ${VRF}
+ ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \
+ test_lwt_ip_encap.o sec encap_gre dev veth2 ${VRF}
+ elif [ "${ENCAP}" == "IPv6" ] ; then
+ ip -netns ${NS2} route add ${IPv4_DST} encap bpf in obj \
+ test_lwt_ip_encap.o sec encap_gre6 dev veth2 ${VRF}
+ ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj \
+ test_lwt_ip_encap.o sec encap_gre6 dev veth2 ${VRF}
+ else
+ echo "FAIL: unknown encap ${ENCAP}"
+ TEST_STATUS=1
+ fi
+ test_ping IPv4 0
+ test_ping IPv6 0
+
+ # a negative test: remove routes to GRE devices: ping fails
+ remove_routes_to_gredev
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ # another negative test
+ add_unreachable_routes_to_gredev
+ test_ping IPv4 1
+ test_ping IPv6 1
+
+ cleanup
+ process_test_results
+}
+
+VRF=""
+test_egress IPv4
+test_egress IPv6
+test_ingress IPv4
+test_ingress IPv6
+
+VRF="vrf red"
+test_egress IPv4
+test_egress IPv6
+test_ingress IPv4
+test_ingress IPv6
+
+print_test_summary_and_exit
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index e2b9eee37187..a3fbc571280a 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Testsuite for eBPF maps
*
* Copyright (c) 2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2016 Facebook
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
*/
#include <stdio.h>
@@ -27,23 +24,17 @@
#include "bpf_util.h"
#include "bpf_rlimit.h"
+#include "test_maps.h"
#ifndef ENOTSUPP
#define ENOTSUPP 524
#endif
-static int map_flags;
+static int skips;
-#define CHECK(condition, tag, format...) ({ \
- int __ret = !!(condition); \
- if (__ret) { \
- printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \
- printf(format); \
- exit(-1); \
- } \
-})
+static int map_flags;
-static void test_hashmap(int task, void *data)
+static void test_hashmap(unsigned int task, void *data)
{
long long key, next_key, first_key, value;
int fd;
@@ -133,7 +124,7 @@ static void test_hashmap(int task, void *data)
close(fd);
}
-static void test_hashmap_sizes(int task, void *data)
+static void test_hashmap_sizes(unsigned int task, void *data)
{
int fd, i, j;
@@ -153,7 +144,7 @@ static void test_hashmap_sizes(int task, void *data)
}
}
-static void test_hashmap_percpu(int task, void *data)
+static void test_hashmap_percpu(unsigned int task, void *data)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
BPF_DECLARE_PERCPU(long, value);
@@ -280,7 +271,7 @@ static int helper_fill_hashmap(int max_entries)
return fd;
}
-static void test_hashmap_walk(int task, void *data)
+static void test_hashmap_walk(unsigned int task, void *data)
{
int fd, i, max_entries = 1000;
long long key, value, next_key;
@@ -351,7 +342,7 @@ static void test_hashmap_zero_seed(void)
close(second);
}
-static void test_arraymap(int task, void *data)
+static void test_arraymap(unsigned int task, void *data)
{
int key, next_key, fd;
long long value;
@@ -406,7 +397,7 @@ static void test_arraymap(int task, void *data)
close(fd);
}
-static void test_arraymap_percpu(int task, void *data)
+static void test_arraymap_percpu(unsigned int task, void *data)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
BPF_DECLARE_PERCPU(long, values);
@@ -502,7 +493,7 @@ static void test_arraymap_percpu_many_keys(void)
close(fd);
}
-static void test_devmap(int task, void *data)
+static void test_devmap(unsigned int task, void *data)
{
int fd;
__u32 key, value;
@@ -517,7 +508,7 @@ static void test_devmap(int task, void *data)
close(fd);
}
-static void test_queuemap(int task, void *data)
+static void test_queuemap(unsigned int task, void *data)
{
const int MAP_SIZE = 32;
__u32 vals[MAP_SIZE + MAP_SIZE/2], val;
@@ -575,7 +566,7 @@ static void test_queuemap(int task, void *data)
close(fd);
}
-static void test_stackmap(int task, void *data)
+static void test_stackmap(unsigned int task, void *data)
{
const int MAP_SIZE = 32;
__u32 vals[MAP_SIZE + MAP_SIZE/2], val;
@@ -633,7 +624,6 @@ static void test_stackmap(int task, void *data)
close(fd);
}
-#include <sys/socket.h>
#include <sys/ioctl.h>
#include <arpa/inet.h>
#include <sys/select.h>
@@ -641,7 +631,7 @@ static void test_stackmap(int task, void *data)
#define SOCKMAP_PARSE_PROG "./sockmap_parse_prog.o"
#define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o"
#define SOCKMAP_TCP_MSG_PROG "./sockmap_tcp_msg_prog.o"
-static void test_sockmap(int tasks, void *data)
+static void test_sockmap(unsigned int tasks, void *data)
{
struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_msg, *bpf_map_break;
int map_fd_msg = 0, map_fd_rx = 0, map_fd_tx = 0, map_fd_break;
@@ -725,6 +715,15 @@ static void test_sockmap(int tasks, void *data)
sizeof(key), sizeof(value),
6, 0);
if (fd < 0) {
+ if (!bpf_probe_map_type(BPF_MAP_TYPE_SOCKMAP, 0)) {
+ printf("%s SKIP (unsupported map type BPF_MAP_TYPE_SOCKMAP)\n",
+ __func__);
+ skips++;
+ for (i = 0; i < 6; i++)
+ close(sfd[i]);
+ return;
+ }
+
printf("Failed to create sockmap %i\n", fd);
goto out_sockmap;
}
@@ -1258,10 +1257,11 @@ static void test_map_large(void)
}
#define run_parallel(N, FN, DATA) \
- printf("Fork %d tasks to '" #FN "'\n", N); \
+ printf("Fork %u tasks to '" #FN "'\n", N); \
__run_parallel(N, FN, DATA)
-static void __run_parallel(int tasks, void (*fn)(int task, void *data),
+static void __run_parallel(unsigned int tasks,
+ void (*fn)(unsigned int task, void *data),
void *data)
{
pid_t pid[tasks];
@@ -1302,7 +1302,7 @@ static void test_map_stress(void)
#define DO_UPDATE 1
#define DO_DELETE 0
-static void test_update_delete(int fn, void *data)
+static void test_update_delete(unsigned int fn, void *data)
{
int do_update = ((int *)data)[1];
int fd = ((int *)data)[0];
@@ -1692,6 +1692,10 @@ static void run_all_tests(void)
test_map_in_map();
}
+#define DECLARE
+#include <map_tests/tests.h>
+#undef DECLARE
+
int main(void)
{
srand(time(NULL));
@@ -1702,6 +1706,10 @@ int main(void)
map_flags = BPF_F_NO_PREALLOC;
run_all_tests();
- printf("test_maps: OK\n");
+#define CALL
+#include <map_tests/tests.h>
+#undef CALL
+
+ printf("test_maps: OK, %d SKIPPED\n", skips);
return 0;
}
diff --git a/tools/testing/selftests/bpf/test_maps.h b/tools/testing/selftests/bpf/test_maps.h
new file mode 100644
index 000000000000..77d8587ac4ed
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_maps.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TEST_MAPS_H
+#define _TEST_MAPS_H
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#define CHECK(condition, tag, format...) ({ \
+ int __ret = !!(condition); \
+ if (__ret) { \
+ printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \
+ printf(format); \
+ exit(-1); \
+ } \
+})
+
+#endif
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
index d59642e70f56..425f9ed27c3b 100755
--- a/tools/testing/selftests/bpf/test_offload.py
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -1,6 +1,7 @@
#!/usr/bin/python3
# Copyright (C) 2017 Netronome Systems, Inc.
+# Copyright (c) 2019 Mellanox Technologies. All rights reserved
#
# This software is licensed under the GNU General License Version 2,
# June 1991 as shown in the file COPYING in the top-level directory of this
@@ -15,14 +16,17 @@
from datetime import datetime
import argparse
+import errno
import json
import os
import pprint
import random
+import re
import string
import struct
import subprocess
import time
+import traceback
logfile = None
log_level = 1
@@ -78,7 +82,9 @@ def fail(cond, msg):
if not cond:
return
print("FAIL: " + msg)
- log("FAIL: " + msg, "", level=1)
+ tb = "".join(traceback.extract_stack().format())
+ print(tb)
+ log("FAIL: " + msg, tb, level=1)
os.sys.exit(1)
def start_test(msg):
@@ -303,6 +309,8 @@ class DebugfsDir:
_, out = cmd('ls ' + path)
for f in out.split():
+ if f == "ports":
+ continue
p = os.path.join(path, f)
if os.path.isfile(p):
_, out = cmd('cat %s/%s' % (path, f))
@@ -318,42 +326,112 @@ class DebugfsDir:
return dfs
-class NetdevSim:
+class NetdevSimDev:
"""
- Class for netdevsim netdevice and its attributes.
+ Class for netdevsim bus device and its attributes.
"""
- def __init__(self, link=None):
- self.link = link
+ def __init__(self, port_count=1):
+ addr = 0
+ while True:
+ try:
+ with open("/sys/bus/netdevsim/new_device", "w") as f:
+ f.write("%u %u" % (addr, port_count))
+ except OSError as e:
+ if e.errno == errno.ENOSPC:
+ addr += 1
+ continue
+ raise e
+ break
+ self.addr = addr
+
+ # As probe of netdevsim device might happen from a workqueue,
+ # so wait here until all netdevs appear.
+ self.wait_for_netdevs(port_count)
+
+ ret, out = cmd("udevadm settle", fail=False)
+ if ret:
+ raise Exception("udevadm settle failed")
+ ifnames = self.get_ifnames()
- self.dev = self._netdevsim_create()
devs.append(self)
+ self.dfs_dir = "/sys/kernel/debug/netdevsim/netdevsim%u/" % addr
+
+ self.nsims = []
+ for port_index in range(port_count):
+ self.nsims.append(NetdevSim(self, port_index, ifnames[port_index]))
+
+ def get_ifnames(self):
+ ifnames = []
+ listdir = os.listdir("/sys/bus/netdevsim/devices/netdevsim%u/net/" % self.addr)
+ for ifname in listdir:
+ ifnames.append(ifname)
+ ifnames.sort()
+ return ifnames
+
+ def wait_for_netdevs(self, port_count):
+ timeout = 5
+ timeout_start = time.time()
+
+ while True:
+ try:
+ ifnames = self.get_ifnames()
+ except FileNotFoundError as e:
+ ifnames = []
+ if len(ifnames) == port_count:
+ break
+ if time.time() < timeout_start + timeout:
+ continue
+ raise Exception("netdevices did not appear within timeout")
- self.ns = ""
+ def dfs_num_bound_progs(self):
+ path = os.path.join(self.dfs_dir, "bpf_bound_progs")
+ _, progs = cmd('ls %s' % (path))
+ return len(progs.split())
- self.dfs_dir = '/sys/kernel/debug/netdevsim/%s' % (self.dev['ifname'])
- self.sdev_dir = self.dfs_dir + '/sdev/'
- self.dfs_refresh()
+ def dfs_get_bound_progs(self, expected):
+ progs = DebugfsDir(os.path.join(self.dfs_dir, "bpf_bound_progs"))
+ if expected is not None:
+ if len(progs) != expected:
+ fail(True, "%d BPF programs bound, expected %d" %
+ (len(progs), expected))
+ return progs
- def __getitem__(self, key):
- return self.dev[key]
+ def remove(self):
+ with open("/sys/bus/netdevsim/del_device", "w") as f:
+ f.write("%u" % self.addr)
+ devs.remove(self)
- def _netdevsim_create(self):
- link = "" if self.link is None else "link " + self.link.dev['ifname']
- _, old = ip("link show")
- ip("link add sim%d {link} type netdevsim".format(link=link))
- _, new = ip("link show")
+ def remove_nsim(self, nsim):
+ self.nsims.remove(nsim)
+ with open("/sys/bus/netdevsim/devices/netdevsim%u/del_port" % self.addr ,"w") as f:
+ f.write("%u" % nsim.port_index)
- for dev in new:
- f = filter(lambda x: x["ifname"] == dev["ifname"], old)
- if len(list(f)) == 0:
- return dev
+class NetdevSim:
+ """
+ Class for netdevsim netdevice and its attributes.
+ """
- raise Exception("failed to create netdevsim device")
+ def __init__(self, nsimdev, port_index, ifname):
+ # In case udev renamed the netdev to according to new schema,
+ # check if the name matches the port_index.
+ nsimnamere = re.compile("eni\d+np(\d+)")
+ match = nsimnamere.match(ifname)
+ if match and int(match.groups()[0]) != port_index + 1:
+ raise Exception("netdevice name mismatches the expected one")
+
+ self.nsimdev = nsimdev
+ self.port_index = port_index
+ self.ns = ""
+ self.dfs_dir = "%s/ports/%u/" % (nsimdev.dfs_dir, port_index)
+ self.dfs_refresh()
+ _, [self.dev] = ip("link show dev %s" % ifname)
+
+ def __getitem__(self, key):
+ return self.dev[key]
def remove(self):
- devs.remove(self)
- ip("link del dev %s" % (self.dev["ifname"]), ns=self.ns)
+ self.nsimdev.remove_nsim(self)
def dfs_refresh(self):
self.dfs = DebugfsDir(self.dfs_dir)
@@ -364,22 +442,9 @@ class NetdevSim:
_, data = cmd('cat %s' % (path))
return data.strip()
- def dfs_num_bound_progs(self):
- path = os.path.join(self.sdev_dir, "bpf_bound_progs")
- _, progs = cmd('ls %s' % (path))
- return len(progs.split())
-
- def dfs_get_bound_progs(self, expected):
- progs = DebugfsDir(os.path.join(self.sdev_dir, "bpf_bound_progs"))
- if expected is not None:
- if len(progs) != expected:
- fail(True, "%d BPF programs bound, expected %d" %
- (len(progs), expected))
- return progs
-
def wait_for_flush(self, bound=0, total=0, n_retry=20):
for i in range(n_retry):
- nbound = self.dfs_num_bound_progs()
+ nbound = self.nsimdev.dfs_num_bound_progs()
nprogs = len(bpftool_prog_list())
if nbound == bound and nprogs == total:
return
@@ -589,6 +654,15 @@ def check_verifier_log(output, reference):
return
fail(True, "Missing or incorrect message from netdevsim in verifier log")
+def check_multi_basic(two_xdps):
+ fail(two_xdps["mode"] != 4, "Bad mode reported with multiple programs")
+ fail("prog" in two_xdps, "Base program reported in multi program mode")
+ fail(len(two_xdps["attached"]) != 2,
+ "Wrong attached program count with two programs")
+ fail(two_xdps["attached"][0]["prog"]["id"] ==
+ two_xdps["attached"][1]["prog"]["id"],
+ "Offloaded and other programs have the same id")
+
def test_spurios_extack(sim, obj, skip_hw, needle):
res = sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_hw=skip_hw,
include_stderr=True)
@@ -600,6 +674,68 @@ def test_spurios_extack(sim, obj, skip_hw, needle):
include_stderr=True)
check_no_extack(res, needle)
+def test_multi_prog(simdev, sim, obj, modename, modeid):
+ start_test("Test multi-attachment XDP - %s + offload..." %
+ (modename or "default", ))
+ sim.set_xdp(obj, "offload")
+ xdp = sim.ip_link_show(xdp=True)["xdp"]
+ offloaded = sim.dfs_read("bpf_offloaded_id")
+ fail("prog" not in xdp, "Base program not reported in single program mode")
+ fail(len(xdp["attached"]) != 1,
+ "Wrong attached program count with one program")
+
+ sim.set_xdp(obj, modename)
+ two_xdps = sim.ip_link_show(xdp=True)["xdp"]
+
+ fail(xdp["attached"][0] not in two_xdps["attached"],
+ "Offload program not reported after other activated")
+ check_multi_basic(two_xdps)
+
+ offloaded2 = sim.dfs_read("bpf_offloaded_id")
+ fail(offloaded != offloaded2,
+ "Offload ID changed after loading other program")
+
+ start_test("Test multi-attachment XDP - replace...")
+ ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True)
+ fail(ret == 0, "Replaced one of programs without -force")
+ check_extack(err, "XDP program already attached.", args)
+
+ if modename == "" or modename == "drv":
+ othermode = "" if modename == "drv" else "drv"
+ start_test("Test multi-attachment XDP - detach...")
+ ret, _, err = sim.unset_xdp(othermode, force=True,
+ fail=False, include_stderr=True)
+ fail(ret == 0, "Removed program with a bad mode")
+ check_extack(err, "program loaded with different flags.", args)
+
+ sim.unset_xdp("offload")
+ xdp = sim.ip_link_show(xdp=True)["xdp"]
+ offloaded = sim.dfs_read("bpf_offloaded_id")
+
+ fail(xdp["mode"] != modeid, "Bad mode reported after multiple programs")
+ fail("prog" not in xdp,
+ "Base program not reported after multi program mode")
+ fail(xdp["attached"][0] not in two_xdps["attached"],
+ "Offload program not reported after other activated")
+ fail(len(xdp["attached"]) != 1,
+ "Wrong attached program count with remaining programs")
+ fail(offloaded != "0", "Offload ID reported with only other program left")
+
+ start_test("Test multi-attachment XDP - reattach...")
+ sim.set_xdp(obj, "offload")
+ two_xdps = sim.ip_link_show(xdp=True)["xdp"]
+
+ fail(xdp["attached"][0] not in two_xdps["attached"],
+ "Other program not reported after offload activated")
+ check_multi_basic(two_xdps)
+
+ start_test("Test multi-attachment XDP - device remove...")
+ simdev.remove()
+
+ simdev = NetdevSimDev()
+ sim, = simdev.nsims
+ sim.set_ethtool_tc_offloads(True)
+ return [simdev, sim]
# Parse command line
parser = argparse.ArgumentParser()
@@ -656,12 +792,14 @@ try:
bytecode = bpf_bytecode("1,6 0 0 4294967295,")
start_test("Test destruction of generic XDP...")
- sim = NetdevSim()
+ simdev = NetdevSimDev()
+ sim, = simdev.nsims
sim.set_xdp(obj, "generic")
- sim.remove()
+ simdev.remove()
bpftool_prog_list_wait(expected=0)
- sim = NetdevSim()
+ simdev = NetdevSimDev()
+ sim, = simdev.nsims
sim.tc_add_ingress()
start_test("Test TC non-offloaded...")
@@ -671,7 +809,7 @@ try:
start_test("Test TC non-offloaded isn't getting bound...")
ret, _ = sim.cls_bpf_add_filter(obj, fail=False)
fail(ret != 0, "Software TC filter did not load")
- sim.dfs_get_bound_progs(expected=0)
+ simdev.dfs_get_bound_progs(expected=0)
sim.tc_flush_filters()
@@ -688,7 +826,7 @@ try:
start_test("Test TC offload by default...")
ret, _ = sim.cls_bpf_add_filter(obj, fail=False)
fail(ret != 0, "Software TC filter did not load")
- sim.dfs_get_bound_progs(expected=0)
+ simdev.dfs_get_bound_progs(expected=0)
ingress = sim.tc_show_ingress(expected=1)
fltr = ingress[0]
fail(not fltr["in_hw"], "Filter not offloaded by default")
@@ -698,7 +836,7 @@ try:
start_test("Test TC cBPF bytcode tries offload by default...")
ret, _ = sim.cls_bpf_add_filter(bytecode, fail=False)
fail(ret != 0, "Software TC filter did not load")
- sim.dfs_get_bound_progs(expected=0)
+ simdev.dfs_get_bound_progs(expected=0)
ingress = sim.tc_show_ingress(expected=1)
fltr = ingress[0]
fail(not fltr["in_hw"], "Bytecode not offloaded by default")
@@ -766,7 +904,7 @@ try:
check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
start_test("Test TC offload basics...")
- dfs = sim.dfs_get_bound_progs(expected=1)
+ dfs = simdev.dfs_get_bound_progs(expected=1)
progs = bpftool_prog_list(expected=1)
ingress = sim.tc_show_ingress(expected=1)
@@ -801,18 +939,20 @@ try:
start_test("Test destroying device gets rid of TC filters...")
sim.cls_bpf_add_filter(obj, skip_sw=True)
- sim.remove()
+ simdev.remove()
bpftool_prog_list_wait(expected=0)
- sim = NetdevSim()
+ simdev = NetdevSimDev()
+ sim, = simdev.nsims
sim.set_ethtool_tc_offloads(True)
start_test("Test destroying device gets rid of XDP...")
sim.set_xdp(obj, "offload")
- sim.remove()
+ simdev.remove()
bpftool_prog_list_wait(expected=0)
- sim = NetdevSim()
+ simdev = NetdevSimDev()
+ sim, = simdev.nsims
sim.set_ethtool_tc_offloads(True)
start_test("Test XDP prog reporting...")
@@ -842,7 +982,9 @@ try:
ret, _, err = sim.set_xdp(obj, "generic", force=True,
fail=False, include_stderr=True)
fail(ret == 0, "Replaced XDP program with a program in different mode")
- fail(err.count("File exists") != 1, "Replaced driver XDP with generic")
+ check_extack(err,
+ "native and generic XDP can't be active at the same time.",
+ args)
ret, _, err = sim.set_xdp(obj, "", force=True,
fail=False, include_stderr=True)
fail(ret == 0, "Replaced XDP program with a program in different mode")
@@ -896,7 +1038,7 @@ try:
check_verifier_log(err, "[netdevsim] Hello from netdevsim!")
start_test("Test XDP offload is device bound...")
- dfs = sim.dfs_get_bound_progs(expected=1)
+ dfs = simdev.dfs_get_bound_progs(expected=1)
dprog = dfs[0]
fail(prog["id"] != link_xdp["id"], "Program IDs don't match")
@@ -915,7 +1057,8 @@ try:
bpftool_prog_list_wait(expected=0)
start_test("Test attempt to use a program for a wrong device...")
- sim2 = NetdevSim()
+ simdev2 = NetdevSimDev()
+ sim2, = simdev2.nsims
sim2.set_xdp(obj, "offload")
pin_file, pinned = pin_prog("/sys/fs/bpf/tmp")
@@ -923,7 +1066,7 @@ try:
fail=False, include_stderr=True)
fail(ret == 0, "Pinned program loaded for a different device accepted")
check_extack_nsim(err, "program bound to different dev.", args)
- sim2.remove()
+ simdev2.remove()
ret, _, err = sim.set_xdp(pinned, "offload",
fail=False, include_stderr=True)
fail(ret == 0, "Pinned program loaded for a removed device accepted")
@@ -931,59 +1074,9 @@ try:
rm(pin_file)
bpftool_prog_list_wait(expected=0)
- start_test("Test multi-attachment XDP - attach...")
- sim.set_xdp(obj, "offload")
- xdp = sim.ip_link_show(xdp=True)["xdp"]
- offloaded = sim.dfs_read("bpf_offloaded_id")
- fail("prog" not in xdp, "Base program not reported in single program mode")
- fail(len(ipl["xdp"]["attached"]) != 1,
- "Wrong attached program count with one program")
-
- sim.set_xdp(obj, "")
- two_xdps = sim.ip_link_show(xdp=True)["xdp"]
- offloaded2 = sim.dfs_read("bpf_offloaded_id")
-
- fail(two_xdps["mode"] != 4, "Bad mode reported with multiple programs")
- fail("prog" in two_xdps, "Base program reported in multi program mode")
- fail(xdp["attached"][0] not in two_xdps["attached"],
- "Offload program not reported after driver activated")
- fail(len(two_xdps["attached"]) != 2,
- "Wrong attached program count with two programs")
- fail(two_xdps["attached"][0]["prog"]["id"] ==
- two_xdps["attached"][1]["prog"]["id"],
- "offloaded and drv programs have the same id")
- fail(offloaded != offloaded2,
- "offload ID changed after loading driver program")
-
- start_test("Test multi-attachment XDP - replace...")
- ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True)
- fail(err.count("busy") != 1, "Replaced one of programs without -force")
-
- start_test("Test multi-attachment XDP - detach...")
- ret, _, err = sim.unset_xdp("drv", force=True,
- fail=False, include_stderr=True)
- fail(ret == 0, "Removed program with a bad mode")
- check_extack(err, "program loaded with different flags.", args)
-
- sim.unset_xdp("offload")
- xdp = sim.ip_link_show(xdp=True)["xdp"]
- offloaded = sim.dfs_read("bpf_offloaded_id")
-
- fail(xdp["mode"] != 1, "Bad mode reported after multiple programs")
- fail("prog" not in xdp,
- "Base program not reported after multi program mode")
- fail(xdp["attached"][0] not in two_xdps["attached"],
- "Offload program not reported after driver activated")
- fail(len(ipl["xdp"]["attached"]) != 1,
- "Wrong attached program count with remaining programs")
- fail(offloaded != "0", "offload ID reported with only driver program left")
-
- start_test("Test multi-attachment XDP - device remove...")
- sim.set_xdp(obj, "offload")
- sim.remove()
-
- sim = NetdevSim()
- sim.set_ethtool_tc_offloads(True)
+ simdev, sim = test_multi_prog(simdev, sim, obj, "", 1)
+ simdev, sim = test_multi_prog(simdev, sim, obj, "drv", 1)
+ simdev, sim = test_multi_prog(simdev, sim, obj, "generic", 2)
start_test("Test mixing of TC and XDP...")
sim.tc_add_ingress()
@@ -1030,15 +1123,15 @@ try:
start_test("Test if netdev removal waits for translation...")
delay_msec = 500
- sim.dfs["bpf_bind_verifier_delay"] = delay_msec
+ sim.dfs["dev/bpf_bind_verifier_delay"] = delay_msec
start = time.time()
cmd_line = "tc filter add dev %s ingress bpf %s da skip_sw" % \
(sim['ifname'], obj)
tc_proc = cmd(cmd_line, background=True, fail=False)
# Wait for the verifier to start
- while sim.dfs_num_bound_progs() <= 2:
+ while simdev.dfs_num_bound_progs() <= 2:
pass
- sim.remove()
+ simdev.remove()
end = time.time()
ret, _ = cmd_result(tc_proc, fail=False)
time_diff = end - start
@@ -1053,7 +1146,8 @@ try:
clean_up()
bpftool_prog_list_wait(expected=0)
- sim = NetdevSim()
+ simdev = NetdevSimDev()
+ sim, = simdev.nsims
map_obj = bpf_obj("sample_map_ret0.o")
start_test("Test loading program with maps...")
sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON
@@ -1075,7 +1169,7 @@ try:
prog_file, _ = pin_prog("/sys/fs/bpf/tmp_prog")
map_file, _ = pin_map("/sys/fs/bpf/tmp_map", idx=1, expected=2)
- sim.remove()
+ simdev.remove()
start_test("Test bpftool bound info reporting (removed dev)...")
check_dev_info_removed(prog_file=prog_file, map_file=map_file)
@@ -1084,7 +1178,8 @@ try:
clean_up()
bpftool_prog_list_wait(expected=0)
- sim = NetdevSim()
+ simdev = NetdevSimDev()
+ sim, = simdev.nsims
start_test("Test map update (no flags)...")
sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON
@@ -1165,27 +1260,29 @@ try:
start_test("Test map remove...")
sim.unset_xdp("offload")
bpftool_map_list_wait(expected=0)
- sim.remove()
+ simdev.remove()
- sim = NetdevSim()
+ simdev = NetdevSimDev()
+ sim, = simdev.nsims
sim.set_xdp(map_obj, "offload", JSON=False) # map fixup msg breaks JSON
- sim.remove()
+ simdev.remove()
bpftool_map_list_wait(expected=0)
start_test("Test map creation fail path...")
- sim = NetdevSim()
+ simdev = NetdevSimDev()
+ sim, = simdev.nsims
sim.dfs["bpf_map_accept"] = "N"
ret, _ = sim.set_xdp(map_obj, "offload", JSON=False, fail=False)
fail(ret == 0,
"netdevsim didn't refuse to create a map with offload disabled")
- sim.remove()
+ simdev.remove()
start_test("Test multi-dev ASIC program reuse...")
- simA = NetdevSim()
- simB1 = NetdevSim()
- simB2 = NetdevSim(link=simB1)
- simB3 = NetdevSim(link=simB1)
+ simdevA = NetdevSimDev()
+ simA, = simdevA.nsims
+ simdevB = NetdevSimDev(3)
+ simB1, simB2, simB3 = simdevB.nsims
sims = (simA, simB1, simB2, simB3)
simB = (simB1, simB2, simB3)
@@ -1197,13 +1294,13 @@ try:
progB = bpf_pinned("/sys/fs/bpf/nsimB")
simA.set_xdp(progA, "offload", JSON=False)
- for d in simB:
+ for d in simdevB.nsims:
d.set_xdp(progB, "offload", JSON=False)
start_test("Test multi-dev ASIC cross-dev replace...")
ret, _ = simA.set_xdp(progB, "offload", force=True, JSON=False, fail=False)
fail(ret == 0, "cross-ASIC program allowed")
- for d in simB:
+ for d in simdevB.nsims:
ret, _ = d.set_xdp(progA, "offload", force=True, JSON=False, fail=False)
fail(ret == 0, "cross-ASIC program allowed")
@@ -1215,7 +1312,7 @@ try:
fail=False, include_stderr=True)
fail(ret == 0, "cross-ASIC program allowed")
check_extack_nsim(err, "program bound to different dev.", args)
- for d in simB:
+ for d in simdevB.nsims:
ret, _, err = d.set_xdp(progA, "offload", force=True, JSON=False,
fail=False, include_stderr=True)
fail(ret == 0, "cross-ASIC program allowed")
@@ -1252,7 +1349,7 @@ try:
start_test("Test multi-dev ASIC cross-dev destruction...")
bpftool_prog_list_wait(expected=2)
- simA.remove()
+ simdevA.remove()
bpftool_prog_list_wait(expected=1)
ifnameB = bpftool("prog show %s" % (progB))[1]["dev"]["ifname"]
@@ -1270,6 +1367,7 @@ try:
fail(ifnameB != simB3['ifname'], "program not bound to remaining device")
simB3.remove()
+ simdevB.remove()
bpftool_prog_list_wait(expected=0)
start_test("Test multi-dev ASIC cross-dev destruction - orphaned...")
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 25f0083a9b2e..dae0819b1141 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -1,95 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017 Facebook
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
*/
-#include <stdio.h>
-#include <unistd.h>
-#include <errno.h>
-#include <string.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <time.h>
-
-#include <linux/types.h>
-typedef __u16 __sum16;
-#include <arpa/inet.h>
-#include <linux/if_ether.h>
-#include <linux/if_packet.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/tcp.h>
-#include <linux/filter.h>
-#include <linux/perf_event.h>
-#include <linux/unistd.h>
-
-#include <sys/ioctl.h>
-#include <sys/wait.h>
-#include <sys/types.h>
-#include <fcntl.h>
-
-#include <linux/bpf.h>
-#include <linux/err.h>
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-
-#include "test_iptunnel_common.h"
-#include "bpf_util.h"
-#include "bpf_endian.h"
+#include "test_progs.h"
#include "bpf_rlimit.h"
-#include "trace_helpers.h"
-
-static int error_cnt, pass_cnt;
-static bool jit_enabled;
-#define MAGIC_BYTES 123
+int error_cnt, pass_cnt;
+bool jit_enabled;
+bool verifier_stats = false;
-/* ipv4 test vector */
-static struct {
- struct ethhdr eth;
- struct iphdr iph;
- struct tcphdr tcp;
-} __packed pkt_v4 = {
+struct ipv4_packet pkt_v4 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
- .iph.protocol = 6,
+ .iph.protocol = IPPROTO_TCP,
.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
+ .tcp.doff = 5,
};
-/* ipv6 test vector */
-static struct {
- struct ethhdr eth;
- struct ipv6hdr iph;
- struct tcphdr tcp;
-} __packed pkt_v6 = {
+struct ipv6_packet pkt_v6 = {
.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
- .iph.nexthdr = 6,
+ .iph.nexthdr = IPPROTO_TCP,
.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
+ .tcp.doff = 5,
};
-#define _CHECK(condition, tag, duration, format...) ({ \
- int __ret = !!(condition); \
- if (__ret) { \
- error_cnt++; \
- printf("%s:FAIL:%s ", __func__, tag); \
- printf(format); \
- } else { \
- pass_cnt++; \
- printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
- } \
- __ret; \
-})
-
-#define CHECK(condition, tag, format...) \
- _CHECK(condition, tag, duration, format)
-#define CHECK_ATTR(condition, tag, format...) \
- _CHECK(condition, tag, tattr.duration, format)
-
-static int bpf_find_map(const char *test, struct bpf_object *obj,
- const char *name)
+int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
{
struct bpf_map *map;
@@ -102,349 +38,6 @@ static int bpf_find_map(const char *test, struct bpf_object *obj,
return bpf_map__fd(map);
}
-static void test_pkt_access(void)
-{
- const char *file = "./test_pkt_access.o";
- struct bpf_object *obj;
- __u32 duration, retval;
- int err, prog_fd;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "ipv4",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
-
- err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "ipv6",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
- bpf_object__close(obj);
-}
-
-static void test_prog_run_xattr(void)
-{
- const char *file = "./test_pkt_access.o";
- struct bpf_object *obj;
- char buf[10];
- int err;
- struct bpf_prog_test_run_attr tattr = {
- .repeat = 1,
- .data_in = &pkt_v4,
- .data_size_in = sizeof(pkt_v4),
- .data_out = buf,
- .data_size_out = 5,
- };
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj,
- &tattr.prog_fd);
- if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
- return;
-
- memset(buf, 0, sizeof(buf));
-
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err != -1 || errno != ENOSPC || tattr.retval, "run",
- "err %d errno %d retval %d\n", err, errno, tattr.retval);
-
- CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
- "incorrect output size, want %lu have %u\n",
- sizeof(pkt_v4), tattr.data_size_out);
-
- CHECK_ATTR(buf[5] != 0, "overflow",
- "BPF_PROG_TEST_RUN ignored size hint\n");
-
- tattr.data_out = NULL;
- tattr.data_size_out = 0;
- errno = 0;
-
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err || errno || tattr.retval, "run_no_output",
- "err %d errno %d retval %d\n", err, errno, tattr.retval);
-
- tattr.data_size_out = 1;
- err = bpf_prog_test_run_xattr(&tattr);
- CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err);
-
- bpf_object__close(obj);
-}
-
-static void test_xdp(void)
-{
- struct vip key4 = {.protocol = 6, .family = AF_INET};
- struct vip key6 = {.protocol = 6, .family = AF_INET6};
- struct iptnl_info value4 = {.family = AF_INET};
- struct iptnl_info value6 = {.family = AF_INET6};
- const char *file = "./test_xdp.o";
- struct bpf_object *obj;
- char buf[128];
- struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
- struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
- __u32 duration, retval, size;
- int err, prog_fd, map_fd;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- map_fd = bpf_find_map(__func__, obj, "vip2tnl");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &key4, &value4, 0);
- bpf_map_update_elem(map_fd, &key6, &value6, 0);
-
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
-
- CHECK(err || retval != XDP_TX || size != 74 ||
- iph->protocol != IPPROTO_IPIP, "ipv4",
- "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
-
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
- buf, &size, &retval, &duration);
- CHECK(err || retval != XDP_TX || size != 114 ||
- iph6->nexthdr != IPPROTO_IPV6, "ipv6",
- "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
-out:
- bpf_object__close(obj);
-}
-
-static void test_xdp_adjust_tail(void)
-{
- const char *file = "./test_adjust_tail.o";
- struct bpf_object *obj;
- char buf[128];
- __u32 duration, retval, size;
- int err, prog_fd;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
-
- CHECK(err || retval != XDP_DROP,
- "ipv4", "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
-
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
- buf, &size, &retval, &duration);
- CHECK(err || retval != XDP_TX || size != 54,
- "ipv6", "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
- bpf_object__close(obj);
-}
-
-
-
-#define MAGIC_VAL 0x1234
-#define NUM_ITER 100000
-#define VIP_NUM 5
-
-static void test_l4lb(const char *file)
-{
- unsigned int nr_cpus = bpf_num_possible_cpus();
- struct vip key = {.protocol = 6};
- struct vip_meta {
- __u32 flags;
- __u32 vip_num;
- } value = {.vip_num = VIP_NUM};
- __u32 stats_key = VIP_NUM;
- struct vip_stats {
- __u64 bytes;
- __u64 pkts;
- } stats[nr_cpus];
- struct real_definition {
- union {
- __be32 dst;
- __be32 dstv6[4];
- };
- __u8 flags;
- } real_def = {.dst = MAGIC_VAL};
- __u32 ch_key = 11, real_num = 3;
- __u32 duration, retval, size;
- int err, i, prog_fd, map_fd;
- __u64 bytes = 0, pkts = 0;
- struct bpf_object *obj;
- char buf[128];
- u32 *magic = (u32 *)buf;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- map_fd = bpf_find_map(__func__, obj, "vip_map");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &key, &value, 0);
-
- map_fd = bpf_find_map(__func__, obj, "ch_rings");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
-
- map_fd = bpf_find_map(__func__, obj, "reals");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
-
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
- *magic != MAGIC_VAL, "ipv4",
- "err %d errno %d retval %d size %d magic %x\n",
- err, errno, retval, size, *magic);
-
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
- *magic != MAGIC_VAL, "ipv6",
- "err %d errno %d retval %d size %d magic %x\n",
- err, errno, retval, size, *magic);
-
- map_fd = bpf_find_map(__func__, obj, "stats");
- if (map_fd < 0)
- goto out;
- bpf_map_lookup_elem(map_fd, &stats_key, stats);
- for (i = 0; i < nr_cpus; i++) {
- bytes += stats[i].bytes;
- pkts += stats[i].pkts;
- }
- if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
- error_cnt++;
- printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
- }
-out:
- bpf_object__close(obj);
-}
-
-static void test_l4lb_all(void)
-{
- const char *file1 = "./test_l4lb.o";
- const char *file2 = "./test_l4lb_noinline.o";
-
- test_l4lb(file1);
- test_l4lb(file2);
-}
-
-static void test_xdp_noinline(void)
-{
- const char *file = "./test_xdp_noinline.o";
- unsigned int nr_cpus = bpf_num_possible_cpus();
- struct vip key = {.protocol = 6};
- struct vip_meta {
- __u32 flags;
- __u32 vip_num;
- } value = {.vip_num = VIP_NUM};
- __u32 stats_key = VIP_NUM;
- struct vip_stats {
- __u64 bytes;
- __u64 pkts;
- } stats[nr_cpus];
- struct real_definition {
- union {
- __be32 dst;
- __be32 dstv6[4];
- };
- __u8 flags;
- } real_def = {.dst = MAGIC_VAL};
- __u32 ch_key = 11, real_num = 3;
- __u32 duration, retval, size;
- int err, i, prog_fd, map_fd;
- __u64 bytes = 0, pkts = 0;
- struct bpf_object *obj;
- char buf[128];
- u32 *magic = (u32 *)buf;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- map_fd = bpf_find_map(__func__, obj, "vip_map");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &key, &value, 0);
-
- map_fd = bpf_find_map(__func__, obj, "ch_rings");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
-
- map_fd = bpf_find_map(__func__, obj, "reals");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
-
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 1 || size != 54 ||
- *magic != MAGIC_VAL, "ipv4",
- "err %d errno %d retval %d size %d magic %x\n",
- err, errno, retval, size, *magic);
-
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 1 || size != 74 ||
- *magic != MAGIC_VAL, "ipv6",
- "err %d errno %d retval %d size %d magic %x\n",
- err, errno, retval, size, *magic);
-
- map_fd = bpf_find_map(__func__, obj, "stats");
- if (map_fd < 0)
- goto out;
- bpf_map_lookup_elem(map_fd, &stats_key, stats);
- for (i = 0; i < nr_cpus; i++) {
- bytes += stats[i].bytes;
- pkts += stats[i].pkts;
- }
- if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
- error_cnt++;
- printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
- }
-out:
- bpf_object__close(obj);
-}
-
-static void test_tcp_estats(void)
-{
- const char *file = "./test_tcp_estats.o";
- int err, prog_fd;
- struct bpf_object *obj;
- __u32 duration = 0;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- CHECK(err, "", "err %d errno %d\n", err, errno);
- if (err) {
- error_cnt++;
- return;
- }
-
- bpf_object__close(obj);
-}
-
-static inline __u64 ptr_to_u64(const void *ptr)
-{
- return (__u64) (unsigned long) ptr;
-}
-
static bool is_jit_enabled(void)
{
const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
@@ -463,475 +56,7 @@ static bool is_jit_enabled(void)
return enabled;
}
-static void test_bpf_obj_id(void)
-{
- const __u64 array_magic_value = 0xfaceb00c;
- const __u32 array_key = 0;
- const int nr_iters = 2;
- const char *file = "./test_obj_id.o";
- const char *expected_prog_name = "test_obj_id";
- const char *expected_map_name = "test_map_id";
- const __u64 nsec_per_sec = 1000000000;
-
- struct bpf_object *objs[nr_iters];
- int prog_fds[nr_iters], map_fds[nr_iters];
- /* +1 to test for the info_len returned by kernel */
- struct bpf_prog_info prog_infos[nr_iters + 1];
- struct bpf_map_info map_infos[nr_iters + 1];
- /* Each prog only uses one map. +1 to test nr_map_ids
- * returned by kernel.
- */
- __u32 map_ids[nr_iters + 1];
- char jited_insns[128], xlated_insns[128], zeros[128];
- __u32 i, next_id, info_len, nr_id_found, duration = 0;
- struct timespec real_time_ts, boot_time_ts;
- int err = 0;
- __u64 array_value;
- uid_t my_uid = getuid();
- time_t now, load_time;
-
- err = bpf_prog_get_fd_by_id(0);
- CHECK(err >= 0 || errno != ENOENT,
- "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
-
- err = bpf_map_get_fd_by_id(0);
- CHECK(err >= 0 || errno != ENOENT,
- "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
-
- for (i = 0; i < nr_iters; i++)
- objs[i] = NULL;
-
- /* Check bpf_obj_get_info_by_fd() */
- bzero(zeros, sizeof(zeros));
- for (i = 0; i < nr_iters; i++) {
- now = time(NULL);
- err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
- &objs[i], &prog_fds[i]);
- /* test_obj_id.o is a dumb prog. It should never fail
- * to load.
- */
- if (err)
- error_cnt++;
- assert(!err);
-
- /* Insert a magic value to the map */
- map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
- assert(map_fds[i] >= 0);
- err = bpf_map_update_elem(map_fds[i], &array_key,
- &array_magic_value, 0);
- assert(!err);
-
- /* Check getting map info */
- info_len = sizeof(struct bpf_map_info) * 2;
- bzero(&map_infos[i], info_len);
- err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
- &info_len);
- if (CHECK(err ||
- map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
- map_infos[i].key_size != sizeof(__u32) ||
- map_infos[i].value_size != sizeof(__u64) ||
- map_infos[i].max_entries != 1 ||
- map_infos[i].map_flags != 0 ||
- info_len != sizeof(struct bpf_map_info) ||
- strcmp((char *)map_infos[i].name, expected_map_name),
- "get-map-info(fd)",
- "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
- err, errno,
- map_infos[i].type, BPF_MAP_TYPE_ARRAY,
- info_len, sizeof(struct bpf_map_info),
- map_infos[i].key_size,
- map_infos[i].value_size,
- map_infos[i].max_entries,
- map_infos[i].map_flags,
- map_infos[i].name, expected_map_name))
- goto done;
-
- /* Check getting prog info */
- info_len = sizeof(struct bpf_prog_info) * 2;
- bzero(&prog_infos[i], info_len);
- bzero(jited_insns, sizeof(jited_insns));
- bzero(xlated_insns, sizeof(xlated_insns));
- prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
- prog_infos[i].jited_prog_len = sizeof(jited_insns);
- prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
- prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
- prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
- prog_infos[i].nr_map_ids = 2;
- err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
- assert(!err);
- err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
- assert(!err);
- err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
- &info_len);
- load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
- + (prog_infos[i].load_time / nsec_per_sec);
- if (CHECK(err ||
- prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
- info_len != sizeof(struct bpf_prog_info) ||
- (jit_enabled && !prog_infos[i].jited_prog_len) ||
- (jit_enabled &&
- !memcmp(jited_insns, zeros, sizeof(zeros))) ||
- !prog_infos[i].xlated_prog_len ||
- !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
- load_time < now - 60 || load_time > now + 60 ||
- prog_infos[i].created_by_uid != my_uid ||
- prog_infos[i].nr_map_ids != 1 ||
- *(int *)(long)prog_infos[i].map_ids != map_infos[i].id ||
- strcmp((char *)prog_infos[i].name, expected_prog_name),
- "get-prog-info(fd)",
- "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
- err, errno, i,
- prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
- info_len, sizeof(struct bpf_prog_info),
- jit_enabled,
- prog_infos[i].jited_prog_len,
- prog_infos[i].xlated_prog_len,
- !!memcmp(jited_insns, zeros, sizeof(zeros)),
- !!memcmp(xlated_insns, zeros, sizeof(zeros)),
- load_time, now,
- prog_infos[i].created_by_uid, my_uid,
- prog_infos[i].nr_map_ids, 1,
- *(int *)(long)prog_infos[i].map_ids, map_infos[i].id,
- prog_infos[i].name, expected_prog_name))
- goto done;
- }
-
- /* Check bpf_prog_get_next_id() */
- nr_id_found = 0;
- next_id = 0;
- while (!bpf_prog_get_next_id(next_id, &next_id)) {
- struct bpf_prog_info prog_info = {};
- __u32 saved_map_id;
- int prog_fd;
-
- info_len = sizeof(prog_info);
-
- prog_fd = bpf_prog_get_fd_by_id(next_id);
- if (prog_fd < 0 && errno == ENOENT)
- /* The bpf_prog is in the dead row */
- continue;
- if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
- "prog_fd %d next_id %d errno %d\n",
- prog_fd, next_id, errno))
- break;
-
- for (i = 0; i < nr_iters; i++)
- if (prog_infos[i].id == next_id)
- break;
-
- if (i == nr_iters)
- continue;
-
- nr_id_found++;
-
- /* Negative test:
- * prog_info.nr_map_ids = 1
- * prog_info.map_ids = NULL
- */
- prog_info.nr_map_ids = 1;
- err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
- if (CHECK(!err || errno != EFAULT,
- "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
- err, errno, EFAULT))
- break;
- bzero(&prog_info, sizeof(prog_info));
- info_len = sizeof(prog_info);
-
- saved_map_id = *(int *)((long)prog_infos[i].map_ids);
- prog_info.map_ids = prog_infos[i].map_ids;
- prog_info.nr_map_ids = 2;
- err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
- prog_infos[i].jited_prog_insns = 0;
- prog_infos[i].xlated_prog_insns = 0;
- CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
- memcmp(&prog_info, &prog_infos[i], info_len) ||
- *(int *)(long)prog_info.map_ids != saved_map_id,
- "get-prog-info(next_id->fd)",
- "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
- err, errno, info_len, sizeof(struct bpf_prog_info),
- memcmp(&prog_info, &prog_infos[i], info_len),
- *(int *)(long)prog_info.map_ids, saved_map_id);
- close(prog_fd);
- }
- CHECK(nr_id_found != nr_iters,
- "check total prog id found by get_next_id",
- "nr_id_found %u(%u)\n",
- nr_id_found, nr_iters);
-
- /* Check bpf_map_get_next_id() */
- nr_id_found = 0;
- next_id = 0;
- while (!bpf_map_get_next_id(next_id, &next_id)) {
- struct bpf_map_info map_info = {};
- int map_fd;
-
- info_len = sizeof(map_info);
-
- map_fd = bpf_map_get_fd_by_id(next_id);
- if (map_fd < 0 && errno == ENOENT)
- /* The bpf_map is in the dead row */
- continue;
- if (CHECK(map_fd < 0, "get-map-fd(next_id)",
- "map_fd %d next_id %u errno %d\n",
- map_fd, next_id, errno))
- break;
-
- for (i = 0; i < nr_iters; i++)
- if (map_infos[i].id == next_id)
- break;
-
- if (i == nr_iters)
- continue;
-
- nr_id_found++;
-
- err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
- assert(!err);
-
- err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
- CHECK(err || info_len != sizeof(struct bpf_map_info) ||
- memcmp(&map_info, &map_infos[i], info_len) ||
- array_value != array_magic_value,
- "check get-map-info(next_id->fd)",
- "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
- err, errno, info_len, sizeof(struct bpf_map_info),
- memcmp(&map_info, &map_infos[i], info_len),
- array_value, array_magic_value);
-
- close(map_fd);
- }
- CHECK(nr_id_found != nr_iters,
- "check total map id found by get_next_id",
- "nr_id_found %u(%u)\n",
- nr_id_found, nr_iters);
-
-done:
- for (i = 0; i < nr_iters; i++)
- bpf_object__close(objs[i]);
-}
-
-static void test_pkt_md_access(void)
-{
- const char *file = "./test_pkt_md_access.o";
- struct bpf_object *obj;
- __u32 duration, retval;
- int err, prog_fd;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
- NULL, NULL, &retval, &duration);
- CHECK(err || retval, "",
- "err %d errno %d retval %d duration %d\n",
- err, errno, retval, duration);
-
- bpf_object__close(obj);
-}
-
-static void test_obj_name(void)
-{
- struct {
- const char *name;
- int success;
- int expected_errno;
- } tests[] = {
- { "", 1, 0 },
- { "_123456789ABCDE", 1, 0 },
- { "_123456789ABCDEF", 0, EINVAL },
- { "_123456789ABCD\n", 0, EINVAL },
- };
- struct bpf_insn prog[] = {
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- __u32 duration = 0;
- int i;
-
- for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
- size_t name_len = strlen(tests[i].name) + 1;
- union bpf_attr attr;
- size_t ncopy;
- int fd;
-
- /* test different attr.prog_name during BPF_PROG_LOAD */
- ncopy = name_len < sizeof(attr.prog_name) ?
- name_len : sizeof(attr.prog_name);
- bzero(&attr, sizeof(attr));
- attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
- attr.insn_cnt = 2;
- attr.insns = ptr_to_u64(prog);
- attr.license = ptr_to_u64("");
- memcpy(attr.prog_name, tests[i].name, ncopy);
-
- fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
- CHECK((tests[i].success && fd < 0) ||
- (!tests[i].success && fd != -1) ||
- (!tests[i].success && errno != tests[i].expected_errno),
- "check-bpf-prog-name",
- "fd %d(%d) errno %d(%d)\n",
- fd, tests[i].success, errno, tests[i].expected_errno);
-
- if (fd != -1)
- close(fd);
-
- /* test different attr.map_name during BPF_MAP_CREATE */
- ncopy = name_len < sizeof(attr.map_name) ?
- name_len : sizeof(attr.map_name);
- bzero(&attr, sizeof(attr));
- attr.map_type = BPF_MAP_TYPE_ARRAY;
- attr.key_size = 4;
- attr.value_size = 4;
- attr.max_entries = 1;
- attr.map_flags = 0;
- memcpy(attr.map_name, tests[i].name, ncopy);
- fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
- CHECK((tests[i].success && fd < 0) ||
- (!tests[i].success && fd != -1) ||
- (!tests[i].success && errno != tests[i].expected_errno),
- "check-bpf-map-name",
- "fd %d(%d) errno %d(%d)\n",
- fd, tests[i].success, errno, tests[i].expected_errno);
-
- if (fd != -1)
- close(fd);
- }
-}
-
-static void test_tp_attach_query(void)
-{
- const int num_progs = 3;
- int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
- __u32 duration = 0, info_len, saved_prog_ids[num_progs];
- const char *file = "./test_tracepoint.o";
- struct perf_event_query_bpf *query;
- struct perf_event_attr attr = {};
- struct bpf_object *obj[num_progs];
- struct bpf_prog_info prog_info;
- char buf[256];
-
- snprintf(buf, sizeof(buf),
- "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
- efd = open(buf, O_RDONLY, 0);
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
- return;
- bytes = read(efd, buf, sizeof(buf));
- close(efd);
- if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
- "read", "bytes %d errno %d\n", bytes, errno))
- return;
-
- attr.config = strtol(buf, NULL, 0);
- attr.type = PERF_TYPE_TRACEPOINT;
- attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
- attr.sample_period = 1;
- attr.wakeup_events = 1;
-
- query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
- for (i = 0; i < num_progs; i++) {
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
- &prog_fd[i]);
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
- goto cleanup1;
-
- bzero(&prog_info, sizeof(prog_info));
- prog_info.jited_prog_len = 0;
- prog_info.xlated_prog_len = 0;
- prog_info.nr_map_ids = 0;
- info_len = sizeof(prog_info);
- err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
- if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
- err, errno))
- goto cleanup1;
- saved_prog_ids[i] = prog_info.id;
-
- pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
- 0 /* cpu 0 */, -1 /* group id */,
- 0 /* flags */);
- if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
- pmu_fd[i], errno))
- goto cleanup2;
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
- err, errno))
- goto cleanup3;
-
- if (i == 0) {
- /* check NULL prog array query */
- query->ids_len = num_progs;
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
- if (CHECK(err || query->prog_cnt != 0,
- "perf_event_ioc_query_bpf",
- "err %d errno %d query->prog_cnt %u\n",
- err, errno, query->prog_cnt))
- goto cleanup3;
- }
-
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
- err, errno))
- goto cleanup3;
-
- if (i == 1) {
- /* try to get # of programs only */
- query->ids_len = 0;
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
- if (CHECK(err || query->prog_cnt != 2,
- "perf_event_ioc_query_bpf",
- "err %d errno %d query->prog_cnt %u\n",
- err, errno, query->prog_cnt))
- goto cleanup3;
-
- /* try a few negative tests */
- /* invalid query pointer */
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
- (struct perf_event_query_bpf *)0x1);
- if (CHECK(!err || errno != EFAULT,
- "perf_event_ioc_query_bpf",
- "err %d errno %d\n", err, errno))
- goto cleanup3;
-
- /* no enough space */
- query->ids_len = 1;
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
- if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
- "perf_event_ioc_query_bpf",
- "err %d errno %d query->prog_cnt %u\n",
- err, errno, query->prog_cnt))
- goto cleanup3;
- }
-
- query->ids_len = num_progs;
- err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
- if (CHECK(err || query->prog_cnt != (i + 1),
- "perf_event_ioc_query_bpf",
- "err %d errno %d query->prog_cnt %u\n",
- err, errno, query->prog_cnt))
- goto cleanup3;
- for (j = 0; j < i + 1; j++)
- if (CHECK(saved_prog_ids[j] != query->ids[j],
- "perf_event_ioc_query_bpf",
- "#%d saved_prog_id %x query prog_id %x\n",
- j, saved_prog_ids[j], query->ids[j]))
- goto cleanup3;
- }
-
- i = num_progs - 1;
- for (; i >= 0; i--) {
- cleanup3:
- ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
- cleanup2:
- close(pmu_fd[i]);
- cleanup1:
- bpf_object__close(obj[i]);
- }
- free(query);
-}
-
-static int compare_map_keys(int map1_fd, int map2_fd)
+int compare_map_keys(int map1_fd, int map2_fd)
{
__u32 key, next_key;
char val_buf[PERF_MAX_STACK_DEPTH *
@@ -958,7 +83,7 @@ static int compare_map_keys(int map1_fd, int map2_fd)
return 0;
}
-static int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
+int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
{
__u32 key, next_key, *cur_key_p, *next_key_p;
char *val_buf1, *val_buf2;
@@ -994,165 +119,7 @@ out:
return err;
}
-static void test_stacktrace_map()
-{
- int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
- const char *file = "./test_stacktrace_map.o";
- int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
- struct perf_event_attr attr = {};
- __u32 key, val, duration = 0;
- struct bpf_object *obj;
- char buf[256];
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
- return;
-
- /* Get the ID for the sched/sched_switch tracepoint */
- snprintf(buf, sizeof(buf),
- "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
- efd = open(buf, O_RDONLY, 0);
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
- goto close_prog;
-
- bytes = read(efd, buf, sizeof(buf));
- close(efd);
- if (bytes <= 0 || bytes >= sizeof(buf))
- goto close_prog;
-
- /* Open the perf event and attach bpf progrram */
- attr.config = strtol(buf, NULL, 0);
- attr.type = PERF_TYPE_TRACEPOINT;
- attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
- attr.sample_period = 1;
- attr.wakeup_events = 1;
- pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
- 0 /* cpu 0 */, -1 /* group id */,
- 0 /* flags */);
- if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
- pmu_fd, errno))
- goto close_prog;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
- if (err)
- goto disable_pmu;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
- if (err)
- goto disable_pmu;
-
- /* find map fds */
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
- if (control_map_fd < 0)
- goto disable_pmu;
-
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
- if (stackid_hmap_fd < 0)
- goto disable_pmu;
-
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
- if (stackmap_fd < 0)
- goto disable_pmu;
-
- stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
- if (stack_amap_fd < 0)
- goto disable_pmu;
-
- /* give some time for bpf program run */
- sleep(1);
-
- /* disable stack trace collection */
- key = 0;
- val = 1;
- bpf_map_update_elem(control_map_fd, &key, &val, 0);
-
- /* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
- */
- err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
- if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu_noerr;
-
- err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
- if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu_noerr;
-
- stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
- err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
- if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu_noerr;
-
- goto disable_pmu_noerr;
-disable_pmu:
- error_cnt++;
-disable_pmu_noerr:
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
- close(pmu_fd);
-close_prog:
- bpf_object__close(obj);
-}
-
-static void test_stacktrace_map_raw_tp()
-{
- int control_map_fd, stackid_hmap_fd, stackmap_fd;
- const char *file = "./test_stacktrace_map.o";
- int efd, err, prog_fd;
- __u32 key, val, duration = 0;
- struct bpf_object *obj;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
- return;
-
- efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
- if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
- goto close_prog;
-
- /* find map fds */
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
- if (control_map_fd < 0)
- goto close_prog;
-
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
- if (stackid_hmap_fd < 0)
- goto close_prog;
-
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
- if (stackmap_fd < 0)
- goto close_prog;
-
- /* give some time for bpf program run */
- sleep(1);
-
- /* disable stack trace collection */
- key = 0;
- val = 1;
- bpf_map_update_elem(control_map_fd, &key, &val, 0);
-
- /* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
- */
- err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
- if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
- "err %d errno %d\n", err, errno))
- goto close_prog;
-
- err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
- if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto close_prog;
-
- goto close_prog_noerr;
-close_prog:
- error_cnt++;
-close_prog_noerr:
- bpf_object__close(obj);
-}
-
-static int extract_build_id(char *build_id, size_t size)
+int extract_build_id(char *build_id, size_t size)
{
FILE *fp;
char *line = NULL;
@@ -1176,769 +143,35 @@ err:
return -1;
}
-static void test_stacktrace_build_id(void)
-{
- int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
- const char *file = "./test_stacktrace_build_id.o";
- int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
- struct perf_event_attr attr = {};
- __u32 key, previous_key, val, duration = 0;
- struct bpf_object *obj;
- char buf[256];
- int i, j;
- struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
- int build_id_matches = 0;
- int retry = 1;
-
-retry:
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
- goto out;
-
- /* Get the ID for the sched/sched_switch tracepoint */
- snprintf(buf, sizeof(buf),
- "/sys/kernel/debug/tracing/events/random/urandom_read/id");
- efd = open(buf, O_RDONLY, 0);
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
- goto close_prog;
-
- bytes = read(efd, buf, sizeof(buf));
- close(efd);
- if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
- "read", "bytes %d errno %d\n", bytes, errno))
- goto close_prog;
-
- /* Open the perf event and attach bpf progrram */
- attr.config = strtol(buf, NULL, 0);
- attr.type = PERF_TYPE_TRACEPOINT;
- attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
- attr.sample_period = 1;
- attr.wakeup_events = 1;
- pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
- 0 /* cpu 0 */, -1 /* group id */,
- 0 /* flags */);
- if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
- pmu_fd, errno))
- goto close_prog;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
- err, errno))
- goto close_pmu;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
- err, errno))
- goto disable_pmu;
-
- /* find map fds */
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
- if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
- if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
- if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
- err, errno))
- goto disable_pmu;
-
- stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
- if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
- == 0);
- assert(system("./urandom_read") == 0);
- /* disable stack trace collection */
- key = 0;
- val = 1;
- bpf_map_update_elem(control_map_fd, &key, &val, 0);
-
- /* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
- */
- err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
- if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
- if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = extract_build_id(buf, 256);
-
- if (CHECK(err, "get build_id with readelf",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
- if (CHECK(err, "get_next_key from stackmap",
- "err %d, errno %d\n", err, errno))
- goto disable_pmu;
-
- do {
- char build_id[64];
-
- err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
- if (CHECK(err, "lookup_elem from stackmap",
- "err %d, errno %d\n", err, errno))
- goto disable_pmu;
- for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
- if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
- id_offs[i].offset != 0) {
- for (j = 0; j < 20; ++j)
- sprintf(build_id + 2 * j, "%02x",
- id_offs[i].build_id[j] & 0xff);
- if (strstr(buf, build_id) != NULL)
- build_id_matches = 1;
- }
- previous_key = key;
- } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
-
- /* stack_map_get_build_id_offset() is racy and sometimes can return
- * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
- * try it one more time.
- */
- if (build_id_matches < 1 && retry--) {
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
- close(pmu_fd);
- bpf_object__close(obj);
- printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
- __func__);
- goto retry;
- }
-
- if (CHECK(build_id_matches < 1, "build id match",
- "Didn't find expected build ID from the map\n"))
- goto disable_pmu;
-
- stack_trace_len = PERF_MAX_STACK_DEPTH
- * sizeof(struct bpf_stack_build_id);
- err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
- CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
- "err %d errno %d\n", err, errno);
-
-disable_pmu:
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
-
-close_pmu:
- close(pmu_fd);
-
-close_prog:
- bpf_object__close(obj);
-
-out:
- return;
-}
-
-static void test_stacktrace_build_id_nmi(void)
-{
- int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
- const char *file = "./test_stacktrace_build_id.o";
- int err, pmu_fd, prog_fd;
- struct perf_event_attr attr = {
- .sample_freq = 5000,
- .freq = 1,
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_CPU_CYCLES,
- };
- __u32 key, previous_key, val, duration = 0;
- struct bpf_object *obj;
- char buf[256];
- int i, j;
- struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
- int build_id_matches = 0;
- int retry = 1;
-
-retry:
- err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
- if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
- return;
-
- pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
- 0 /* cpu 0 */, -1 /* group id */,
- 0 /* flags */);
- if (CHECK(pmu_fd < 0, "perf_event_open",
- "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
- pmu_fd, errno))
- goto close_prog;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
- err, errno))
- goto close_pmu;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
- err, errno))
- goto disable_pmu;
-
- /* find map fds */
- control_map_fd = bpf_find_map(__func__, obj, "control_map");
- if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
- if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
- if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
- err, errno))
- goto disable_pmu;
-
- stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
- if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
- == 0);
- assert(system("taskset 0x1 ./urandom_read 100000") == 0);
- /* disable stack trace collection */
- key = 0;
- val = 1;
- bpf_map_update_elem(control_map_fd, &key, &val, 0);
-
- /* for every element in stackid_hmap, we can find a corresponding one
- * in stackmap, and vise versa.
- */
- err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
- if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
- if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = extract_build_id(buf, 256);
-
- if (CHECK(err, "get build_id with readelf",
- "err %d errno %d\n", err, errno))
- goto disable_pmu;
-
- err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
- if (CHECK(err, "get_next_key from stackmap",
- "err %d, errno %d\n", err, errno))
- goto disable_pmu;
-
- do {
- char build_id[64];
-
- err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
- if (CHECK(err, "lookup_elem from stackmap",
- "err %d, errno %d\n", err, errno))
- goto disable_pmu;
- for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
- if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
- id_offs[i].offset != 0) {
- for (j = 0; j < 20; ++j)
- sprintf(build_id + 2 * j, "%02x",
- id_offs[i].build_id[j] & 0xff);
- if (strstr(buf, build_id) != NULL)
- build_id_matches = 1;
- }
- previous_key = key;
- } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
-
- /* stack_map_get_build_id_offset() is racy and sometimes can return
- * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
- * try it one more time.
- */
- if (build_id_matches < 1 && retry--) {
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
- close(pmu_fd);
- bpf_object__close(obj);
- printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
- __func__);
- goto retry;
- }
-
- if (CHECK(build_id_matches < 1, "build id match",
- "Didn't find expected build ID from the map\n"))
- goto disable_pmu;
-
- /*
- * We intentionally skip compare_stack_ips(). This is because we
- * only support one in_nmi() ips-to-build_id translation per cpu
- * at any time, thus stack_amap here will always fallback to
- * BPF_STACK_BUILD_ID_IP;
- */
-
-disable_pmu:
- ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
-
-close_pmu:
- close(pmu_fd);
-
-close_prog:
- bpf_object__close(obj);
-}
-
-#define MAX_CNT_RAWTP 10ull
-#define MAX_STACK_RAWTP 100
-struct get_stack_trace_t {
- int pid;
- int kern_stack_size;
- int user_stack_size;
- int user_stack_buildid_size;
- __u64 kern_stack[MAX_STACK_RAWTP];
- __u64 user_stack[MAX_STACK_RAWTP];
- struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
-};
-
-static int get_stack_print_output(void *data, int size)
+void *spin_lock_thread(void *arg)
{
- bool good_kern_stack = false, good_user_stack = false;
- const char *nonjit_func = "___bpf_prog_run";
- struct get_stack_trace_t *e = data;
- int i, num_stack;
- static __u64 cnt;
- struct ksym *ks;
-
- cnt++;
-
- if (size < sizeof(struct get_stack_trace_t)) {
- __u64 *raw_data = data;
- bool found = false;
-
- num_stack = size / sizeof(__u64);
- /* If jit is enabled, we do not have a good way to
- * verify the sanity of the kernel stack. So we
- * just assume it is good if the stack is not empty.
- * This could be improved in the future.
- */
- if (jit_enabled) {
- found = num_stack > 0;
- } else {
- for (i = 0; i < num_stack; i++) {
- ks = ksym_search(raw_data[i]);
- if (strcmp(ks->name, nonjit_func) == 0) {
- found = true;
- break;
- }
- }
- }
- if (found) {
- good_kern_stack = true;
- good_user_stack = true;
- }
- } else {
- num_stack = e->kern_stack_size / sizeof(__u64);
- if (jit_enabled) {
- good_kern_stack = num_stack > 0;
- } else {
- for (i = 0; i < num_stack; i++) {
- ks = ksym_search(e->kern_stack[i]);
- if (strcmp(ks->name, nonjit_func) == 0) {
- good_kern_stack = true;
- break;
- }
- }
- }
- if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
- good_user_stack = true;
- }
- if (!good_kern_stack || !good_user_stack)
- return LIBBPF_PERF_EVENT_ERROR;
-
- if (cnt == MAX_CNT_RAWTP)
- return LIBBPF_PERF_EVENT_DONE;
-
- return LIBBPF_PERF_EVENT_CONT;
-}
-
-static void test_get_stack_raw_tp(void)
-{
- const char *file = "./test_get_stack_rawtp.o";
- int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
- struct perf_event_attr attr = {};
- struct timespec tv = {0, 10};
- __u32 key = 0, duration = 0;
- struct bpf_object *obj;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
- return;
-
- efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
- if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
- goto close_prog;
-
- perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
- if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
- perfmap_fd, errno))
- goto close_prog;
-
- err = load_kallsyms();
- if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
- goto close_prog;
-
- attr.sample_type = PERF_SAMPLE_RAW;
- attr.type = PERF_TYPE_SOFTWARE;
- attr.config = PERF_COUNT_SW_BPF_OUTPUT;
- pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
- -1/*group_fd*/, 0);
- if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
- errno))
- goto close_prog;
-
- err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
- if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
- errno))
- goto close_prog;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
- if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
- err, errno))
- goto close_prog;
-
- err = perf_event_mmap(pmu_fd);
- if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
- goto close_prog;
-
- /* trigger some syscall action */
- for (i = 0; i < MAX_CNT_RAWTP; i++)
- nanosleep(&tv, NULL);
-
- err = perf_event_poller(pmu_fd, get_stack_print_output);
- if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
- goto close_prog;
-
- goto close_prog_noerr;
-close_prog:
- error_cnt++;
-close_prog_noerr:
- bpf_object__close(obj);
-}
-
-static void test_task_fd_query_rawtp(void)
-{
- const char *file = "./test_get_stack_rawtp.o";
- __u64 probe_offset, probe_addr;
- __u32 len, prog_id, fd_type;
- struct bpf_object *obj;
- int efd, err, prog_fd;
- __u32 duration = 0;
- char buf[256];
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
- return;
-
- efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
- if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
- goto close_prog;
-
- /* query (getpid(), efd) */
- len = sizeof(buf);
- err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
- &fd_type, &probe_offset, &probe_addr);
- if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
- errno))
- goto close_prog;
-
- err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
- strcmp(buf, "sys_enter") == 0;
- if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
- fd_type, buf))
- goto close_prog;
-
- /* test zero len */
- len = 0;
- err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
- &fd_type, &probe_offset, &probe_addr);
- if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
- err, errno))
- goto close_prog;
- err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
- len == strlen("sys_enter");
- if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
- goto close_prog;
-
- /* test empty buffer */
- len = sizeof(buf);
- err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
- &fd_type, &probe_offset, &probe_addr);
- if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
- err, errno))
- goto close_prog;
- err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
- len == strlen("sys_enter");
- if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
- goto close_prog;
-
- /* test smaller buffer */
- len = 3;
- err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
- &fd_type, &probe_offset, &probe_addr);
- if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
- "err %d errno %d\n", err, errno))
- goto close_prog;
- err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
- len == strlen("sys_enter") &&
- strcmp(buf, "sy") == 0;
- if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
- goto close_prog;
-
- goto close_prog_noerr;
-close_prog:
- error_cnt++;
-close_prog_noerr:
- bpf_object__close(obj);
-}
-
-static void test_task_fd_query_tp_core(const char *probe_name,
- const char *tp_name)
-{
- const char *file = "./test_tracepoint.o";
- int err, bytes, efd, prog_fd, pmu_fd;
- struct perf_event_attr attr = {};
- __u64 probe_offset, probe_addr;
- __u32 len, prog_id, fd_type;
- struct bpf_object *obj;
- __u32 duration = 0;
- char buf[256];
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
- if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
- goto close_prog;
-
- snprintf(buf, sizeof(buf),
- "/sys/kernel/debug/tracing/events/%s/id", probe_name);
- efd = open(buf, O_RDONLY, 0);
- if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
- goto close_prog;
- bytes = read(efd, buf, sizeof(buf));
- close(efd);
- if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
- "bytes %d errno %d\n", bytes, errno))
- goto close_prog;
-
- attr.config = strtol(buf, NULL, 0);
- attr.type = PERF_TYPE_TRACEPOINT;
- attr.sample_type = PERF_SAMPLE_RAW;
- attr.sample_period = 1;
- attr.wakeup_events = 1;
- pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
- 0 /* cpu 0 */, -1 /* group id */,
- 0 /* flags */);
- if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
- goto close_pmu;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
- if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
- errno))
- goto close_pmu;
-
- err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
- if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
- errno))
- goto close_pmu;
-
- /* query (getpid(), pmu_fd) */
- len = sizeof(buf);
- err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
- &fd_type, &probe_offset, &probe_addr);
- if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
- errno))
- goto close_pmu;
-
- err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
- if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
- fd_type, buf))
- goto close_pmu;
-
- close(pmu_fd);
- goto close_prog_noerr;
-
-close_pmu:
- close(pmu_fd);
-close_prog:
- error_cnt++;
-close_prog_noerr:
- bpf_object__close(obj);
-}
-
-static void test_task_fd_query_tp(void)
-{
- test_task_fd_query_tp_core("sched/sched_switch",
- "sched_switch");
- test_task_fd_query_tp_core("syscalls/sys_enter_read",
- "sys_enter_read");
-}
-
-static void test_reference_tracking()
-{
- const char *file = "./test_sk_lookup_kern.o";
- struct bpf_object *obj;
- struct bpf_program *prog;
- __u32 duration = 0;
- int err = 0;
-
- obj = bpf_object__open(file);
- if (IS_ERR(obj)) {
- error_cnt++;
- return;
- }
-
- bpf_object__for_each_program(prog, obj) {
- const char *title;
-
- /* Ignore .text sections */
- title = bpf_program__title(prog, false);
- if (strstr(title, ".text") != NULL)
- continue;
-
- bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS);
+ __u32 duration, retval;
+ int err, prog_fd = *(u32 *) arg;
- /* Expect verifier failure if test name has 'fail' */
- if (strstr(title, "fail") != NULL) {
- libbpf_set_print(NULL, NULL, NULL);
- err = !bpf_program__load(prog, "GPL", 0);
- libbpf_set_print(printf, printf, NULL);
- } else {
- err = bpf_program__load(prog, "GPL", 0);
- }
- CHECK(err, title, "\n");
- }
- bpf_object__close(obj);
+ err = bpf_prog_test_run(prog_fd, 10000, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration);
+ pthread_exit(arg);
}
-enum {
- QUEUE,
- STACK,
-};
+#define DECLARE
+#include <prog_tests/tests.h>
+#undef DECLARE
-static void test_queue_stack_map(int type)
-{
- const int MAP_SIZE = 32;
- __u32 vals[MAP_SIZE], duration, retval, size, val;
- int i, err, prog_fd, map_in_fd, map_out_fd;
- char file[32], buf[128];
- struct bpf_object *obj;
- struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
-
- /* Fill test values to be used */
- for (i = 0; i < MAP_SIZE; i++)
- vals[i] = rand();
-
- if (type == QUEUE)
- strncpy(file, "./test_queue_map.o", sizeof(file));
- else if (type == STACK)
- strncpy(file, "./test_stack_map.o", sizeof(file));
- else
- return;
-
- err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
- if (err) {
- error_cnt++;
- return;
- }
-
- map_in_fd = bpf_find_map(__func__, obj, "map_in");
- if (map_in_fd < 0)
- goto out;
-
- map_out_fd = bpf_find_map(__func__, obj, "map_out");
- if (map_out_fd < 0)
- goto out;
-
- /* Push 32 elements to the input map */
- for (i = 0; i < MAP_SIZE; i++) {
- err = bpf_map_update_elem(map_in_fd, NULL, &vals[i], 0);
- if (err) {
- error_cnt++;
- goto out;
- }
- }
-
- /* The eBPF program pushes iph.saddr in the output map,
- * pops the input map and saves this value in iph.daddr
- */
- for (i = 0; i < MAP_SIZE; i++) {
- if (type == QUEUE) {
- val = vals[i];
- pkt_v4.iph.saddr = vals[i] * 5;
- } else if (type == STACK) {
- val = vals[MAP_SIZE - 1 - i];
- pkt_v4.iph.saddr = vals[MAP_SIZE - 1 - i] * 5;
- }
-
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
- if (err || retval || size != sizeof(pkt_v4) ||
- iph->daddr != val)
- break;
- }
-
- CHECK(err || retval || size != sizeof(pkt_v4) || iph->daddr != val,
- "bpf_map_pop_elem",
- "err %d errno %d retval %d size %d iph->daddr %u\n",
- err, errno, retval, size, iph->daddr);
-
- /* Queue is empty, program should return TC_ACT_SHOT */
- err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
- buf, &size, &retval, &duration);
- CHECK(err || retval != 2 /* TC_ACT_SHOT */|| size != sizeof(pkt_v4),
- "check-queue-stack-map-empty",
- "err %d errno %d retval %d size %d\n",
- err, errno, retval, size);
-
- /* Check that the program pushed elements correctly */
- for (i = 0; i < MAP_SIZE; i++) {
- err = bpf_map_lookup_and_delete_elem(map_out_fd, NULL, &val);
- if (err || val != vals[i] * 5)
- break;
- }
-
- CHECK(i != MAP_SIZE && (err || val != vals[i] * 5),
- "bpf_map_push_elem", "err %d value %u\n", err, val);
-
-out:
- pkt_v4.iph.saddr = 0;
- bpf_object__close(obj);
-}
-
-int main(void)
+int main(int ac, char **av)
{
srand(time(NULL));
jit_enabled = is_jit_enabled();
- test_pkt_access();
- test_prog_run_xattr();
- test_xdp();
- test_xdp_adjust_tail();
- test_l4lb_all();
- test_xdp_noinline();
- test_tcp_estats();
- test_bpf_obj_id();
- test_pkt_md_access();
- test_obj_name();
- test_tp_attach_query();
- test_stacktrace_map();
- test_stacktrace_build_id();
- test_stacktrace_build_id_nmi();
- test_stacktrace_map_raw_tp();
- test_get_stack_raw_tp();
- test_task_fd_query_rawtp();
- test_task_fd_query_tp();
- test_reference_tracking();
- test_queue_stack_map(QUEUE);
- test_queue_stack_map(STACK);
+ if (ac == 2 && strcmp(av[1], "-s") == 0)
+ verifier_stats = true;
+
+#define CALL
+#include <prog_tests/tests.h>
+#undef CALL
printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
new file mode 100644
index 000000000000..f095e1d4c657
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <time.h>
+#include <signal.h>
+
+#include <linux/types.h>
+typedef __u16 __sum16;
+#include <arpa/inet.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/filter.h>
+#include <linux/perf_event.h>
+#include <linux/unistd.h>
+
+#include <sys/ioctl.h>
+#include <sys/wait.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <linux/bpf.h>
+#include <linux/err.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "test_iptunnel_common.h"
+#include "bpf_util.h"
+#include "bpf_endian.h"
+#include "trace_helpers.h"
+#include "flow_dissector_load.h"
+
+extern int error_cnt, pass_cnt;
+extern bool jit_enabled;
+extern bool verifier_stats;
+
+#define MAGIC_BYTES 123
+
+/* ipv4 test vector */
+struct ipv4_packet {
+ struct ethhdr eth;
+ struct iphdr iph;
+ struct tcphdr tcp;
+} __packed;
+extern struct ipv4_packet pkt_v4;
+
+/* ipv6 test vector */
+struct ipv6_packet {
+ struct ethhdr eth;
+ struct ipv6hdr iph;
+ struct tcphdr tcp;
+} __packed;
+extern struct ipv6_packet pkt_v6;
+
+#define _CHECK(condition, tag, duration, format...) ({ \
+ int __ret = !!(condition); \
+ if (__ret) { \
+ error_cnt++; \
+ printf("%s:FAIL:%s ", __func__, tag); \
+ printf(format); \
+ } else { \
+ pass_cnt++; \
+ printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
+ } \
+ __ret; \
+})
+
+#define CHECK(condition, tag, format...) \
+ _CHECK(condition, tag, duration, format)
+#define CHECK_ATTR(condition, tag, format...) \
+ _CHECK(condition, tag, tattr.duration, format)
+
+#define MAGIC_VAL 0x1234
+#define NUM_ITER 100000
+#define VIP_NUM 5
+
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+ return (__u64) (unsigned long) ptr;
+}
+
+int bpf_find_map(const char *test, struct bpf_object *obj, const char *name);
+int compare_map_keys(int map1_fd, int map2_fd);
+int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);
+int extract_build_id(char *build_id, size_t size);
+void *spin_lock_thread(void *arg);
diff --git a/tools/testing/selftests/bpf/test_section_names.c b/tools/testing/selftests/bpf/test_section_names.c
index 7c4f41572b1c..dee2f2eceb0f 100644
--- a/tools/testing/selftests/bpf/test_section_names.c
+++ b/tools/testing/selftests/bpf/test_section_names.c
@@ -119,6 +119,21 @@ static struct sec_name_test tests[] = {
{0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG},
{0, BPF_CGROUP_UDP6_SENDMSG},
},
+ {
+ "cgroup/recvmsg4",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG},
+ {0, BPF_CGROUP_UDP4_RECVMSG},
+ },
+ {
+ "cgroup/recvmsg6",
+ {0, BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG},
+ {0, BPF_CGROUP_UDP6_RECVMSG},
+ },
+ {
+ "cgroup/sysctl",
+ {0, BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL},
+ {0, BPF_CGROUP_SYSCTL},
+ },
};
static int test_prog_type_by_name(const struct sec_name_test *test)
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index 561ffb6d6433..fb679ac3d4b0 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -20,6 +20,7 @@
#define MAX_INSNS 512
char bpf_log_buf[BPF_LOG_BUF_SIZE];
+static bool verbose = false;
struct sock_test {
const char *descr;
@@ -325,6 +326,7 @@ static int load_sock_prog(const struct bpf_insn *prog,
enum bpf_attach_type attach_type)
{
struct bpf_load_program_attr attr;
+ int ret;
memset(&attr, 0, sizeof(struct bpf_load_program_attr));
attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
@@ -332,8 +334,13 @@ static int load_sock_prog(const struct bpf_insn *prog,
attr.insns = prog;
attr.insns_cnt = probe_prog_length(attr.insns);
attr.license = "GPL";
+ attr.log_level = 2;
- return bpf_load_program_xattr(&attr, bpf_log_buf, BPF_LOG_BUF_SIZE);
+ ret = bpf_load_program_xattr(&attr, bpf_log_buf, BPF_LOG_BUF_SIZE);
+ if (verbose && ret < 0)
+ fprintf(stderr, "%s\n", bpf_log_buf);
+
+ return ret;
}
static int attach_sock_prog(int cgfd, int progfd,
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index 3f110eaaf29c..4ecde2392327 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -76,6 +76,7 @@ struct sock_addr_test {
enum {
LOAD_REJECT,
ATTACH_REJECT,
+ ATTACH_OKAY,
SYSCALL_EPERM,
SYSCALL_ENOTSUPP,
SUCCESS,
@@ -88,9 +89,13 @@ static int connect4_prog_load(const struct sock_addr_test *test);
static int connect6_prog_load(const struct sock_addr_test *test);
static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
+static int recvmsg_allow_prog_load(const struct sock_addr_test *test);
+static int recvmsg_deny_prog_load(const struct sock_addr_test *test);
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
+static int recvmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
+static int recvmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
@@ -507,6 +512,92 @@ static struct sock_addr_test tests[] = {
SRC6_REWRITE_IP,
SYSCALL_EPERM,
},
+
+ /* recvmsg */
+ {
+ "recvmsg4: return code ok",
+ recvmsg_allow_prog_load,
+ BPF_CGROUP_UDP4_RECVMSG,
+ BPF_CGROUP_UDP4_RECVMSG,
+ AF_INET,
+ SOCK_DGRAM,
+ NULL,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ ATTACH_OKAY,
+ },
+ {
+ "recvmsg4: return code !ok",
+ recvmsg_deny_prog_load,
+ BPF_CGROUP_UDP4_RECVMSG,
+ BPF_CGROUP_UDP4_RECVMSG,
+ AF_INET,
+ SOCK_DGRAM,
+ NULL,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ LOAD_REJECT,
+ },
+ {
+ "recvmsg6: return code ok",
+ recvmsg_allow_prog_load,
+ BPF_CGROUP_UDP6_RECVMSG,
+ BPF_CGROUP_UDP6_RECVMSG,
+ AF_INET6,
+ SOCK_DGRAM,
+ NULL,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ ATTACH_OKAY,
+ },
+ {
+ "recvmsg6: return code !ok",
+ recvmsg_deny_prog_load,
+ BPF_CGROUP_UDP6_RECVMSG,
+ BPF_CGROUP_UDP6_RECVMSG,
+ AF_INET6,
+ SOCK_DGRAM,
+ NULL,
+ 0,
+ NULL,
+ 0,
+ NULL,
+ LOAD_REJECT,
+ },
+ {
+ "recvmsg4: rewrite IP & port (asm)",
+ recvmsg4_rw_asm_prog_load,
+ BPF_CGROUP_UDP4_RECVMSG,
+ BPF_CGROUP_UDP4_RECVMSG,
+ AF_INET,
+ SOCK_DGRAM,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SERV4_REWRITE_IP,
+ SERV4_REWRITE_PORT,
+ SERV4_IP,
+ SUCCESS,
+ },
+ {
+ "recvmsg6: rewrite IP & port (asm)",
+ recvmsg6_rw_asm_prog_load,
+ BPF_CGROUP_UDP6_RECVMSG,
+ BPF_CGROUP_UDP6_RECVMSG,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SERV6_IP,
+ SUCCESS,
+ },
};
static int mk_sockaddr(int domain, const char *ip, unsigned short port,
@@ -765,8 +856,8 @@ static int connect6_prog_load(const struct sock_addr_test *test)
return load_path(test, CONNECT6_PROG_PATH);
}
-static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
- int32_t rc)
+static int xmsg_ret_only_prog_load(const struct sock_addr_test *test,
+ int32_t rc)
{
struct bpf_insn insns[] = {
/* return rc */
@@ -778,12 +869,22 @@ static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
{
- return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
+ return xmsg_ret_only_prog_load(test, /*rc*/ 1);
}
static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
{
- return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
+ return xmsg_ret_only_prog_load(test, /*rc*/ 0);
+}
+
+static int recvmsg_allow_prog_load(const struct sock_addr_test *test)
+{
+ return xmsg_ret_only_prog_load(test, /*rc*/ 1);
+}
+
+static int recvmsg_deny_prog_load(const struct sock_addr_test *test)
+{
+ return xmsg_ret_only_prog_load(test, /*rc*/ 0);
}
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
@@ -838,6 +939,47 @@ static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
}
+static int recvmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
+{
+ struct sockaddr_in src4_rw_addr;
+
+ if (mk_sockaddr(AF_INET, SERV4_IP, SERV4_PORT,
+ (struct sockaddr *)&src4_rw_addr,
+ sizeof(src4_rw_addr)) == -1)
+ return -1;
+
+ struct bpf_insn insns[] = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+ /* if (sk.family == AF_INET && */
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock_addr, family)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET, 6),
+
+ /* sk.type == SOCK_DGRAM) { */
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock_addr, type)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_DGRAM, 4),
+
+ /* user_ip4 = src4_rw_addr.sin_addr */
+ BPF_MOV32_IMM(BPF_REG_7, src4_rw_addr.sin_addr.s_addr),
+ BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7,
+ offsetof(struct bpf_sock_addr, user_ip4)),
+
+ /* user_port = src4_rw_addr.sin_port */
+ BPF_MOV32_IMM(BPF_REG_7, src4_rw_addr.sin_port),
+ BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7,
+ offsetof(struct bpf_sock_addr, user_port)),
+ /* } */
+
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ };
+
+ return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
+}
+
static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test)
{
return load_path(test, SENDMSG4_PROG_PATH);
@@ -901,6 +1043,39 @@ static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test)
return sendmsg6_rw_dst_asm_prog_load(test, SERV6_REWRITE_IP);
}
+static int recvmsg6_rw_asm_prog_load(const struct sock_addr_test *test)
+{
+ struct sockaddr_in6 src6_rw_addr;
+
+ if (mk_sockaddr(AF_INET6, SERV6_IP, SERV6_PORT,
+ (struct sockaddr *)&src6_rw_addr,
+ sizeof(src6_rw_addr)) == -1)
+ return -1;
+
+ struct bpf_insn insns[] = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+
+ /* if (sk.family == AF_INET6) { */
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock_addr, family)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET6, 10),
+
+ STORE_IPV6(user_ip6, src6_rw_addr.sin6_addr.s6_addr32),
+
+ /* user_port = dst6_rw_addr.sin6_port */
+ BPF_MOV32_IMM(BPF_REG_7, src6_rw_addr.sin6_port),
+ BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7,
+ offsetof(struct bpf_sock_addr, user_port)),
+ /* } */
+
+ /* return 1 */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ };
+
+ return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
+}
+
static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test)
{
return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
@@ -1282,13 +1457,13 @@ out:
return err;
}
-static int run_sendmsg_test_case(const struct sock_addr_test *test)
+static int run_xmsg_test_case(const struct sock_addr_test *test, int max_cmsg)
{
socklen_t addr_len = sizeof(struct sockaddr_storage);
- struct sockaddr_storage expected_src_addr;
- struct sockaddr_storage requested_addr;
struct sockaddr_storage expected_addr;
- struct sockaddr_storage real_src_addr;
+ struct sockaddr_storage server_addr;
+ struct sockaddr_storage sendmsg_addr;
+ struct sockaddr_storage recvmsg_addr;
int clientfd = -1;
int servfd = -1;
int set_cmsg;
@@ -1297,20 +1472,19 @@ static int run_sendmsg_test_case(const struct sock_addr_test *test)
if (test->type != SOCK_DGRAM)
goto err;
- if (init_addrs(test, &requested_addr, &expected_addr,
- &expected_src_addr))
+ if (init_addrs(test, &sendmsg_addr, &server_addr, &expected_addr))
goto err;
/* Prepare server to sendmsg to */
- servfd = start_server(test->type, &expected_addr, addr_len);
+ servfd = start_server(test->type, &server_addr, addr_len);
if (servfd == -1)
goto err;
- for (set_cmsg = 0; set_cmsg <= 1; ++set_cmsg) {
+ for (set_cmsg = 0; set_cmsg <= max_cmsg; ++set_cmsg) {
if (clientfd >= 0)
close(clientfd);
- clientfd = sendmsg_to_server(test->type, &requested_addr,
+ clientfd = sendmsg_to_server(test->type, &sendmsg_addr,
addr_len, set_cmsg, /*flags*/0,
&err);
if (err)
@@ -1330,10 +1504,10 @@ static int run_sendmsg_test_case(const struct sock_addr_test *test)
* specific packet may differ from the one used by default and
* returned by getsockname(2).
*/
- if (recvmsg_from_client(servfd, &real_src_addr) == -1)
+ if (recvmsg_from_client(servfd, &recvmsg_addr) == -1)
goto err;
- if (cmp_addr(&real_src_addr, &expected_src_addr, /*cmp_port*/0))
+ if (cmp_addr(&recvmsg_addr, &expected_addr, /*cmp_port*/0))
goto err;
}
@@ -1366,6 +1540,9 @@ static int run_test_case(int cgfd, const struct sock_addr_test *test)
goto out;
} else if (test->expected_result == ATTACH_REJECT || err) {
goto err;
+ } else if (test->expected_result == ATTACH_OKAY) {
+ err = 0;
+ goto out;
}
switch (test->attach_type) {
@@ -1379,7 +1556,11 @@ static int run_test_case(int cgfd, const struct sock_addr_test *test)
break;
case BPF_CGROUP_UDP4_SENDMSG:
case BPF_CGROUP_UDP6_SENDMSG:
- err = run_sendmsg_test_case(test);
+ err = run_xmsg_test_case(test, 1);
+ break;
+ case BPF_CGROUP_UDP4_RECVMSG:
+ case BPF_CGROUP_UDP6_RECVMSG:
+ err = run_xmsg_test_case(test, 0);
break;
default:
goto err;
diff --git a/tools/testing/selftests/bpf/test_sock_fields.c b/tools/testing/selftests/bpf/test_sock_fields.c
new file mode 100644
index 000000000000..e089477fa0a3
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_sock_fields.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+
+#include <sys/socket.h>
+#include <sys/epoll.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "cgroup_helpers.h"
+#include "bpf_rlimit.h"
+
+enum bpf_addr_array_idx {
+ ADDR_SRV_IDX,
+ ADDR_CLI_IDX,
+ __NR_BPF_ADDR_ARRAY_IDX,
+};
+
+enum bpf_result_array_idx {
+ EGRESS_SRV_IDX,
+ EGRESS_CLI_IDX,
+ INGRESS_LISTEN_IDX,
+ __NR_BPF_RESULT_ARRAY_IDX,
+};
+
+enum bpf_linum_array_idx {
+ EGRESS_LINUM_IDX,
+ INGRESS_LINUM_IDX,
+ __NR_BPF_LINUM_ARRAY_IDX,
+};
+
+struct bpf_spinlock_cnt {
+ struct bpf_spin_lock lock;
+ __u32 cnt;
+};
+
+#define CHECK(condition, tag, format...) ({ \
+ int __ret = !!(condition); \
+ if (__ret) { \
+ printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \
+ printf(format); \
+ printf("\n"); \
+ exit(-1); \
+ } \
+})
+
+#define TEST_CGROUP "/test-bpf-sock-fields"
+#define DATA "Hello BPF!"
+#define DATA_LEN sizeof(DATA)
+
+static struct sockaddr_in6 srv_sa6, cli_sa6;
+static int sk_pkt_out_cnt10_fd;
+static int sk_pkt_out_cnt_fd;
+static int linum_map_fd;
+static int addr_map_fd;
+static int tp_map_fd;
+static int sk_map_fd;
+
+static __u32 addr_srv_idx = ADDR_SRV_IDX;
+static __u32 addr_cli_idx = ADDR_CLI_IDX;
+
+static __u32 egress_srv_idx = EGRESS_SRV_IDX;
+static __u32 egress_cli_idx = EGRESS_CLI_IDX;
+static __u32 ingress_listen_idx = INGRESS_LISTEN_IDX;
+
+static __u32 egress_linum_idx = EGRESS_LINUM_IDX;
+static __u32 ingress_linum_idx = INGRESS_LINUM_IDX;
+
+static void init_loopback6(struct sockaddr_in6 *sa6)
+{
+ memset(sa6, 0, sizeof(*sa6));
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_addr = in6addr_loopback;
+}
+
+static void print_sk(const struct bpf_sock *sk)
+{
+ char src_ip4[24], dst_ip4[24];
+ char src_ip6[64], dst_ip6[64];
+
+ inet_ntop(AF_INET, &sk->src_ip4, src_ip4, sizeof(src_ip4));
+ inet_ntop(AF_INET6, &sk->src_ip6, src_ip6, sizeof(src_ip6));
+ inet_ntop(AF_INET, &sk->dst_ip4, dst_ip4, sizeof(dst_ip4));
+ inet_ntop(AF_INET6, &sk->dst_ip6, dst_ip6, sizeof(dst_ip6));
+
+ printf("state:%u bound_dev_if:%u family:%u type:%u protocol:%u mark:%u priority:%u "
+ "src_ip4:%x(%s) src_ip6:%x:%x:%x:%x(%s) src_port:%u "
+ "dst_ip4:%x(%s) dst_ip6:%x:%x:%x:%x(%s) dst_port:%u\n",
+ sk->state, sk->bound_dev_if, sk->family, sk->type, sk->protocol,
+ sk->mark, sk->priority,
+ sk->src_ip4, src_ip4,
+ sk->src_ip6[0], sk->src_ip6[1], sk->src_ip6[2], sk->src_ip6[3],
+ src_ip6, sk->src_port,
+ sk->dst_ip4, dst_ip4,
+ sk->dst_ip6[0], sk->dst_ip6[1], sk->dst_ip6[2], sk->dst_ip6[3],
+ dst_ip6, ntohs(sk->dst_port));
+}
+
+static void print_tp(const struct bpf_tcp_sock *tp)
+{
+ printf("snd_cwnd:%u srtt_us:%u rtt_min:%u snd_ssthresh:%u rcv_nxt:%u "
+ "snd_nxt:%u snd:una:%u mss_cache:%u ecn_flags:%u "
+ "rate_delivered:%u rate_interval_us:%u packets_out:%u "
+ "retrans_out:%u total_retrans:%u segs_in:%u data_segs_in:%u "
+ "segs_out:%u data_segs_out:%u lost_out:%u sacked_out:%u "
+ "bytes_received:%llu bytes_acked:%llu\n",
+ tp->snd_cwnd, tp->srtt_us, tp->rtt_min, tp->snd_ssthresh,
+ tp->rcv_nxt, tp->snd_nxt, tp->snd_una, tp->mss_cache,
+ tp->ecn_flags, tp->rate_delivered, tp->rate_interval_us,
+ tp->packets_out, tp->retrans_out, tp->total_retrans,
+ tp->segs_in, tp->data_segs_in, tp->segs_out,
+ tp->data_segs_out, tp->lost_out, tp->sacked_out,
+ tp->bytes_received, tp->bytes_acked);
+}
+
+static void check_result(void)
+{
+ struct bpf_tcp_sock srv_tp, cli_tp, listen_tp;
+ struct bpf_sock srv_sk, cli_sk, listen_sk;
+ __u32 ingress_linum, egress_linum;
+ int err;
+
+ err = bpf_map_lookup_elem(linum_map_fd, &egress_linum_idx,
+ &egress_linum);
+ CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
+ "err:%d errno:%d", err, errno);
+
+ err = bpf_map_lookup_elem(linum_map_fd, &ingress_linum_idx,
+ &ingress_linum);
+ CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
+ "err:%d errno:%d", err, errno);
+
+ err = bpf_map_lookup_elem(sk_map_fd, &egress_srv_idx, &srv_sk);
+ CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &egress_srv_idx)",
+ "err:%d errno:%d", err, errno);
+ err = bpf_map_lookup_elem(tp_map_fd, &egress_srv_idx, &srv_tp);
+ CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &egress_srv_idx)",
+ "err:%d errno:%d", err, errno);
+
+ err = bpf_map_lookup_elem(sk_map_fd, &egress_cli_idx, &cli_sk);
+ CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &egress_cli_idx)",
+ "err:%d errno:%d", err, errno);
+ err = bpf_map_lookup_elem(tp_map_fd, &egress_cli_idx, &cli_tp);
+ CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &egress_cli_idx)",
+ "err:%d errno:%d", err, errno);
+
+ err = bpf_map_lookup_elem(sk_map_fd, &ingress_listen_idx, &listen_sk);
+ CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &ingress_listen_idx)",
+ "err:%d errno:%d", err, errno);
+ err = bpf_map_lookup_elem(tp_map_fd, &ingress_listen_idx, &listen_tp);
+ CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &ingress_listen_idx)",
+ "err:%d errno:%d", err, errno);
+
+ printf("listen_sk: ");
+ print_sk(&listen_sk);
+ printf("\n");
+
+ printf("srv_sk: ");
+ print_sk(&srv_sk);
+ printf("\n");
+
+ printf("cli_sk: ");
+ print_sk(&cli_sk);
+ printf("\n");
+
+ printf("listen_tp: ");
+ print_tp(&listen_tp);
+ printf("\n");
+
+ printf("srv_tp: ");
+ print_tp(&srv_tp);
+ printf("\n");
+
+ printf("cli_tp: ");
+ print_tp(&cli_tp);
+ printf("\n");
+
+ CHECK(listen_sk.state != 10 ||
+ listen_sk.family != AF_INET6 ||
+ listen_sk.protocol != IPPROTO_TCP ||
+ memcmp(listen_sk.src_ip6, &in6addr_loopback,
+ sizeof(listen_sk.src_ip6)) ||
+ listen_sk.dst_ip6[0] || listen_sk.dst_ip6[1] ||
+ listen_sk.dst_ip6[2] || listen_sk.dst_ip6[3] ||
+ listen_sk.src_port != ntohs(srv_sa6.sin6_port) ||
+ listen_sk.dst_port,
+ "Unexpected listen_sk",
+ "Check listen_sk output. ingress_linum:%u",
+ ingress_linum);
+
+ CHECK(srv_sk.state == 10 ||
+ !srv_sk.state ||
+ srv_sk.family != AF_INET6 ||
+ srv_sk.protocol != IPPROTO_TCP ||
+ memcmp(srv_sk.src_ip6, &in6addr_loopback,
+ sizeof(srv_sk.src_ip6)) ||
+ memcmp(srv_sk.dst_ip6, &in6addr_loopback,
+ sizeof(srv_sk.dst_ip6)) ||
+ srv_sk.src_port != ntohs(srv_sa6.sin6_port) ||
+ srv_sk.dst_port != cli_sa6.sin6_port,
+ "Unexpected srv_sk", "Check srv_sk output. egress_linum:%u",
+ egress_linum);
+
+ CHECK(cli_sk.state == 10 ||
+ !cli_sk.state ||
+ cli_sk.family != AF_INET6 ||
+ cli_sk.protocol != IPPROTO_TCP ||
+ memcmp(cli_sk.src_ip6, &in6addr_loopback,
+ sizeof(cli_sk.src_ip6)) ||
+ memcmp(cli_sk.dst_ip6, &in6addr_loopback,
+ sizeof(cli_sk.dst_ip6)) ||
+ cli_sk.src_port != ntohs(cli_sa6.sin6_port) ||
+ cli_sk.dst_port != srv_sa6.sin6_port,
+ "Unexpected cli_sk", "Check cli_sk output. egress_linum:%u",
+ egress_linum);
+
+ CHECK(listen_tp.data_segs_out ||
+ listen_tp.data_segs_in ||
+ listen_tp.total_retrans ||
+ listen_tp.bytes_acked,
+ "Unexpected listen_tp", "Check listen_tp output. ingress_linum:%u",
+ ingress_linum);
+
+ CHECK(srv_tp.data_segs_out != 2 ||
+ srv_tp.data_segs_in ||
+ srv_tp.snd_cwnd != 10 ||
+ srv_tp.total_retrans ||
+ srv_tp.bytes_acked != 2 * DATA_LEN,
+ "Unexpected srv_tp", "Check srv_tp output. egress_linum:%u",
+ egress_linum);
+
+ CHECK(cli_tp.data_segs_out ||
+ cli_tp.data_segs_in != 2 ||
+ cli_tp.snd_cwnd != 10 ||
+ cli_tp.total_retrans ||
+ cli_tp.bytes_received != 2 * DATA_LEN,
+ "Unexpected cli_tp", "Check cli_tp output. egress_linum:%u",
+ egress_linum);
+}
+
+static void check_sk_pkt_out_cnt(int accept_fd, int cli_fd)
+{
+ struct bpf_spinlock_cnt pkt_out_cnt = {}, pkt_out_cnt10 = {};
+ int err;
+
+ pkt_out_cnt.cnt = ~0;
+ pkt_out_cnt10.cnt = ~0;
+ err = bpf_map_lookup_elem(sk_pkt_out_cnt_fd, &accept_fd, &pkt_out_cnt);
+ if (!err)
+ err = bpf_map_lookup_elem(sk_pkt_out_cnt10_fd, &accept_fd,
+ &pkt_out_cnt10);
+
+ /* The bpf prog only counts for fullsock and
+ * passive conneciton did not become fullsock until 3WHS
+ * had been finished.
+ * The bpf prog only counted two data packet out but we
+ * specially init accept_fd's pkt_out_cnt by 2 in
+ * init_sk_storage(). Hence, 4 here.
+ */
+ CHECK(err || pkt_out_cnt.cnt != 4 || pkt_out_cnt10.cnt != 40,
+ "bpf_map_lookup_elem(sk_pkt_out_cnt, &accept_fd)",
+ "err:%d errno:%d pkt_out_cnt:%u pkt_out_cnt10:%u",
+ err, errno, pkt_out_cnt.cnt, pkt_out_cnt10.cnt);
+
+ pkt_out_cnt.cnt = ~0;
+ pkt_out_cnt10.cnt = ~0;
+ err = bpf_map_lookup_elem(sk_pkt_out_cnt_fd, &cli_fd, &pkt_out_cnt);
+ if (!err)
+ err = bpf_map_lookup_elem(sk_pkt_out_cnt10_fd, &cli_fd,
+ &pkt_out_cnt10);
+ /* Active connection is fullsock from the beginning.
+ * 1 SYN and 1 ACK during 3WHS
+ * 2 Acks on data packet.
+ *
+ * The bpf_prog initialized it to 0xeB9F.
+ */
+ CHECK(err || pkt_out_cnt.cnt != 0xeB9F + 4 ||
+ pkt_out_cnt10.cnt != 0xeB9F + 40,
+ "bpf_map_lookup_elem(sk_pkt_out_cnt, &cli_fd)",
+ "err:%d errno:%d pkt_out_cnt:%u pkt_out_cnt10:%u",
+ err, errno, pkt_out_cnt.cnt, pkt_out_cnt10.cnt);
+}
+
+static void init_sk_storage(int sk_fd, __u32 pkt_out_cnt)
+{
+ struct bpf_spinlock_cnt scnt = {};
+ int err;
+
+ scnt.cnt = pkt_out_cnt;
+ err = bpf_map_update_elem(sk_pkt_out_cnt_fd, &sk_fd, &scnt,
+ BPF_NOEXIST);
+ CHECK(err, "bpf_map_update_elem(sk_pkt_out_cnt_fd)",
+ "err:%d errno:%d", err, errno);
+
+ scnt.cnt *= 10;
+ err = bpf_map_update_elem(sk_pkt_out_cnt10_fd, &sk_fd, &scnt,
+ BPF_NOEXIST);
+ CHECK(err, "bpf_map_update_elem(sk_pkt_out_cnt10_fd)",
+ "err:%d errno:%d", err, errno);
+}
+
+static void test(void)
+{
+ int listen_fd, cli_fd, accept_fd, epfd, err;
+ struct epoll_event ev;
+ socklen_t addrlen;
+ int i;
+
+ addrlen = sizeof(struct sockaddr_in6);
+ ev.events = EPOLLIN;
+
+ epfd = epoll_create(1);
+ CHECK(epfd == -1, "epoll_create()", "epfd:%d errno:%d", epfd, errno);
+
+ /* Prepare listen_fd */
+ listen_fd = socket(AF_INET6, SOCK_STREAM | SOCK_NONBLOCK, 0);
+ CHECK(listen_fd == -1, "socket()", "listen_fd:%d errno:%d",
+ listen_fd, errno);
+
+ init_loopback6(&srv_sa6);
+ err = bind(listen_fd, (struct sockaddr *)&srv_sa6, sizeof(srv_sa6));
+ CHECK(err, "bind(listen_fd)", "err:%d errno:%d", err, errno);
+
+ err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen);
+ CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d", err, errno);
+
+ err = listen(listen_fd, 1);
+ CHECK(err, "listen(listen_fd)", "err:%d errno:%d", err, errno);
+
+ /* Prepare cli_fd */
+ cli_fd = socket(AF_INET6, SOCK_STREAM | SOCK_NONBLOCK, 0);
+ CHECK(cli_fd == -1, "socket()", "cli_fd:%d errno:%d", cli_fd, errno);
+
+ init_loopback6(&cli_sa6);
+ err = bind(cli_fd, (struct sockaddr *)&cli_sa6, sizeof(cli_sa6));
+ CHECK(err, "bind(cli_fd)", "err:%d errno:%d", err, errno);
+
+ err = getsockname(cli_fd, (struct sockaddr *)&cli_sa6, &addrlen);
+ CHECK(err, "getsockname(cli_fd)", "err:%d errno:%d",
+ err, errno);
+
+ /* Update addr_map with srv_sa6 and cli_sa6 */
+ err = bpf_map_update_elem(addr_map_fd, &addr_srv_idx, &srv_sa6, 0);
+ CHECK(err, "map_update", "err:%d errno:%d", err, errno);
+
+ err = bpf_map_update_elem(addr_map_fd, &addr_cli_idx, &cli_sa6, 0);
+ CHECK(err, "map_update", "err:%d errno:%d", err, errno);
+
+ /* Connect from cli_sa6 to srv_sa6 */
+ err = connect(cli_fd, (struct sockaddr *)&srv_sa6, addrlen);
+ printf("srv_sa6.sin6_port:%u cli_sa6.sin6_port:%u\n\n",
+ ntohs(srv_sa6.sin6_port), ntohs(cli_sa6.sin6_port));
+ CHECK(err && errno != EINPROGRESS,
+ "connect(cli_fd)", "err:%d errno:%d", err, errno);
+
+ ev.data.fd = listen_fd;
+ err = epoll_ctl(epfd, EPOLL_CTL_ADD, listen_fd, &ev);
+ CHECK(err, "epoll_ctl(EPOLL_CTL_ADD, listen_fd)", "err:%d errno:%d",
+ err, errno);
+
+ /* Accept the connection */
+ /* Have some timeout in accept(listen_fd). Just in case. */
+ err = epoll_wait(epfd, &ev, 1, 1000);
+ CHECK(err != 1 || ev.data.fd != listen_fd,
+ "epoll_wait(listen_fd)",
+ "err:%d errno:%d ev.data.fd:%d listen_fd:%d",
+ err, errno, ev.data.fd, listen_fd);
+
+ accept_fd = accept(listen_fd, NULL, NULL);
+ CHECK(accept_fd == -1, "accept(listen_fd)", "accept_fd:%d errno:%d",
+ accept_fd, errno);
+ close(listen_fd);
+
+ ev.data.fd = cli_fd;
+ err = epoll_ctl(epfd, EPOLL_CTL_ADD, cli_fd, &ev);
+ CHECK(err, "epoll_ctl(EPOLL_CTL_ADD, cli_fd)", "err:%d errno:%d",
+ err, errno);
+
+ init_sk_storage(accept_fd, 2);
+
+ for (i = 0; i < 2; i++) {
+ /* Send some data from accept_fd to cli_fd */
+ err = send(accept_fd, DATA, DATA_LEN, 0);
+ CHECK(err != DATA_LEN, "send(accept_fd)", "err:%d errno:%d",
+ err, errno);
+
+ /* Have some timeout in recv(cli_fd). Just in case. */
+ err = epoll_wait(epfd, &ev, 1, 1000);
+ CHECK(err != 1 || ev.data.fd != cli_fd,
+ "epoll_wait(cli_fd)", "err:%d errno:%d ev.data.fd:%d cli_fd:%d",
+ err, errno, ev.data.fd, cli_fd);
+
+ err = recv(cli_fd, NULL, 0, MSG_TRUNC);
+ CHECK(err, "recv(cli_fd)", "err:%d errno:%d", err, errno);
+ }
+
+ check_sk_pkt_out_cnt(accept_fd, cli_fd);
+
+ close(epfd);
+ close(accept_fd);
+ close(cli_fd);
+
+ check_result();
+}
+
+int main(int argc, char **argv)
+{
+ struct bpf_prog_load_attr attr = {
+ .file = "test_sock_fields_kern.o",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ };
+ int cgroup_fd, egress_fd, ingress_fd, err;
+ struct bpf_program *ingress_prog;
+ struct bpf_object *obj;
+ struct bpf_map *map;
+
+ err = setup_cgroup_environment();
+ CHECK(err, "setup_cgroup_environment()", "err:%d errno:%d",
+ err, errno);
+
+ atexit(cleanup_cgroup_environment);
+
+ /* Create a cgroup, get fd, and join it */
+ cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
+ CHECK(cgroup_fd == -1, "create_and_get_cgroup()",
+ "cgroup_fd:%d errno:%d", cgroup_fd, errno);
+
+ err = join_cgroup(TEST_CGROUP);
+ CHECK(err, "join_cgroup", "err:%d errno:%d", err, errno);
+
+ err = bpf_prog_load_xattr(&attr, &obj, &egress_fd);
+ CHECK(err, "bpf_prog_load_xattr()", "err:%d", err);
+
+ ingress_prog = bpf_object__find_program_by_title(obj,
+ "cgroup_skb/ingress");
+ CHECK(!ingress_prog,
+ "bpf_object__find_program_by_title(cgroup_skb/ingress)",
+ "not found");
+ ingress_fd = bpf_program__fd(ingress_prog);
+
+ err = bpf_prog_attach(egress_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0);
+ CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_EGRESS)",
+ "err:%d errno%d", err, errno);
+
+ err = bpf_prog_attach(ingress_fd, cgroup_fd,
+ BPF_CGROUP_INET_INGRESS, 0);
+ CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_INGRESS)",
+ "err:%d errno%d", err, errno);
+ close(cgroup_fd);
+
+ map = bpf_object__find_map_by_name(obj, "addr_map");
+ CHECK(!map, "cannot find addr_map", "(null)");
+ addr_map_fd = bpf_map__fd(map);
+
+ map = bpf_object__find_map_by_name(obj, "sock_result_map");
+ CHECK(!map, "cannot find sock_result_map", "(null)");
+ sk_map_fd = bpf_map__fd(map);
+
+ map = bpf_object__find_map_by_name(obj, "tcp_sock_result_map");
+ CHECK(!map, "cannot find tcp_sock_result_map", "(null)");
+ tp_map_fd = bpf_map__fd(map);
+
+ map = bpf_object__find_map_by_name(obj, "linum_map");
+ CHECK(!map, "cannot find linum_map", "(null)");
+ linum_map_fd = bpf_map__fd(map);
+
+ map = bpf_object__find_map_by_name(obj, "sk_pkt_out_cnt");
+ CHECK(!map, "cannot find sk_pkt_out_cnt", "(null)");
+ sk_pkt_out_cnt_fd = bpf_map__fd(map);
+
+ map = bpf_object__find_map_by_name(obj, "sk_pkt_out_cnt10");
+ CHECK(!map, "cannot find sk_pkt_out_cnt10", "(null)");
+ sk_pkt_out_cnt10_fd = bpf_map__fd(map);
+
+ test();
+
+ bpf_object__close(obj);
+ cleanup_cgroup_environment();
+
+ printf("PASS\n");
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c
index fc7832ee566b..e51d63786ff8 100644
--- a/tools/testing/selftests/bpf/test_socket_cookie.c
+++ b/tools/testing/selftests/bpf/test_socket_cookie.c
@@ -158,10 +158,8 @@ static int run_test(int cgfd)
bpf_object__for_each_program(prog, pobj) {
prog_name = bpf_program__title(prog, /*needs_copy*/ false);
- if (libbpf_attach_type_by_name(prog_name, &attach_type)) {
- log_err("Unexpected prog: %s", prog_name);
+ if (libbpf_attach_type_by_name(prog_name, &attach_type))
goto err;
- }
err = bpf_prog_attach(bpf_program__fd(prog), cgfd, attach_type,
BPF_F_ALLOW_OVERRIDE);
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index e85a771f607b..3845144e2c91 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -10,7 +10,6 @@
#include <unistd.h>
#include <string.h>
#include <errno.h>
-#include <sys/ioctl.h>
#include <stdbool.h>
#include <signal.h>
#include <fcntl.h>
diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c
new file mode 100644
index 000000000000..a3bebd7c68dd
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_sysctl.c
@@ -0,0 +1,1567 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <fcntl.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <linux/filter.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "bpf_rlimit.h"
+#include "bpf_util.h"
+#include "cgroup_helpers.h"
+
+#define CG_PATH "/foo"
+#define MAX_INSNS 512
+#define FIXUP_SYSCTL_VALUE 0
+
+char bpf_log_buf[BPF_LOG_BUF_SIZE];
+
+struct sysctl_test {
+ const char *descr;
+ size_t fixup_value_insn;
+ struct bpf_insn insns[MAX_INSNS];
+ const char *prog_file;
+ enum bpf_attach_type attach_type;
+ const char *sysctl;
+ int open_flags;
+ const char *newval;
+ const char *oldval;
+ enum {
+ LOAD_REJECT,
+ ATTACH_REJECT,
+ OP_EPERM,
+ SUCCESS,
+ } result;
+};
+
+static struct sysctl_test tests[] = {
+ {
+ .descr = "sysctl wrong attach_type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = 0,
+ .sysctl = "kernel/ostype",
+ .open_flags = O_RDONLY,
+ .result = ATTACH_REJECT,
+ },
+ {
+ .descr = "sysctl:read allow all",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "kernel/ostype",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ .descr = "sysctl:read deny all",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "kernel/ostype",
+ .open_flags = O_RDONLY,
+ .result = OP_EPERM,
+ },
+ {
+ .descr = "ctx:write sysctl:read read ok",
+ .insns = {
+ /* If (write) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct bpf_sysctl, write)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 1, 2),
+
+ /* return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_A(1),
+
+ /* else return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "kernel/ostype",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ .descr = "ctx:write sysctl:write read ok",
+ .insns = {
+ /* If (write) */
+ BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
+ offsetof(struct bpf_sysctl, write)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 1, 2),
+
+ /* return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_A(1),
+
+ /* else return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "kernel/domainname",
+ .open_flags = O_WRONLY,
+ .newval = "(none)", /* same as default, should fail anyway */
+ .result = OP_EPERM,
+ },
+ {
+ .descr = "ctx:write sysctl:read write reject",
+ .insns = {
+ /* write = X */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct bpf_sysctl, write)),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "kernel/ostype",
+ .open_flags = O_RDONLY,
+ .result = LOAD_REJECT,
+ },
+ {
+ .descr = "ctx:file_pos sysctl:read read ok",
+ .insns = {
+ /* If (file_pos == X) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct bpf_sysctl, file_pos)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "kernel/ostype",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ .descr = "ctx:file_pos sysctl:read read ok narrow",
+ .insns = {
+ /* If (file_pos == X) */
+ BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
+ offsetof(struct bpf_sysctl, file_pos)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "kernel/ostype",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ .descr = "ctx:file_pos sysctl:read write ok",
+ .insns = {
+ /* file_pos = X */
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct bpf_sysctl, file_pos)),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "kernel/ostype",
+ .open_flags = O_RDONLY,
+ .oldval = "nux\n",
+ .result = SUCCESS,
+ },
+ {
+ .descr = "sysctl_get_name sysctl_value:base ok",
+ .insns = {
+ /* sysctl_get_name arg2 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+
+ /* sysctl_get_name arg3 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_3, 8),
+
+ /* sysctl_get_name arg4 (flags) */
+ BPF_MOV64_IMM(BPF_REG_4, BPF_F_SYSCTL_BASE_NAME),
+
+ /* sysctl_get_name(ctx, buf, buf_len, flags) */
+ BPF_EMIT_CALL(BPF_FUNC_sysctl_get_name),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, sizeof("tcp_mem") - 1, 6),
+ /* buf == "tcp_mem\0") */
+ BPF_LD_IMM64(BPF_REG_8, 0x006d656d5f706374ULL),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/tcp_mem",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ .descr = "sysctl_get_name sysctl_value:base E2BIG truncated",
+ .insns = {
+ /* sysctl_get_name arg2 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+
+ /* sysctl_get_name arg3 (buf_len) too small */
+ BPF_MOV64_IMM(BPF_REG_3, 7),
+
+ /* sysctl_get_name arg4 (flags) */
+ BPF_MOV64_IMM(BPF_REG_4, BPF_F_SYSCTL_BASE_NAME),
+
+ /* sysctl_get_name(ctx, buf, buf_len, flags) */
+ BPF_EMIT_CALL(BPF_FUNC_sysctl_get_name),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -E2BIG, 6),
+
+ /* buf[0:7] == "tcp_me\0") */
+ BPF_LD_IMM64(BPF_REG_8, 0x00656d5f706374ULL),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/tcp_mem",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ .descr = "sysctl_get_name sysctl:full ok",
+ .insns = {
+ /* sysctl_get_name arg2 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -24),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 16),
+
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+
+ /* sysctl_get_name arg3 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_3, 17),
+
+ /* sysctl_get_name arg4 (flags) */
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+
+ /* sysctl_get_name(ctx, buf, buf_len, flags) */
+ BPF_EMIT_CALL(BPF_FUNC_sysctl_get_name),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 16, 14),
+
+ /* buf[0:8] == "net/ipv4" && */
+ BPF_LD_IMM64(BPF_REG_8, 0x347670692f74656eULL),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 10),
+
+ /* buf[8:16] == "/tcp_mem" && */
+ BPF_LD_IMM64(BPF_REG_8, 0x6d656d5f7063742fULL),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 8),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 6),
+
+ /* buf[16:24] == "\0") */
+ BPF_LD_IMM64(BPF_REG_8, 0x0ULL),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 16),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/tcp_mem",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ .descr = "sysctl_get_name sysctl:full E2BIG truncated",
+ .insns = {
+ /* sysctl_get_name arg2 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -16),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 8),
+
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+
+ /* sysctl_get_name arg3 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_3, 16),
+
+ /* sysctl_get_name arg4 (flags) */
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+
+ /* sysctl_get_name(ctx, buf, buf_len, flags) */
+ BPF_EMIT_CALL(BPF_FUNC_sysctl_get_name),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -E2BIG, 10),
+
+ /* buf[0:8] == "net/ipv4" && */
+ BPF_LD_IMM64(BPF_REG_8, 0x347670692f74656eULL),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 6),
+
+ /* buf[8:16] == "/tcp_me\0") */
+ BPF_LD_IMM64(BPF_REG_8, 0x00656d5f7063742fULL),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 8),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/tcp_mem",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ .descr = "sysctl_get_name sysctl:full E2BIG truncated small",
+ .insns = {
+ /* sysctl_get_name arg2 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+
+ /* sysctl_get_name arg3 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_3, 7),
+
+ /* sysctl_get_name arg4 (flags) */
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+
+ /* sysctl_get_name(ctx, buf, buf_len, flags) */
+ BPF_EMIT_CALL(BPF_FUNC_sysctl_get_name),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -E2BIG, 6),
+
+ /* buf[0:8] == "net/ip\0") */
+ BPF_LD_IMM64(BPF_REG_8, 0x000070692f74656eULL),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/tcp_mem",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ .descr = "sysctl_get_current_value sysctl:read ok, gt",
+ .insns = {
+ /* sysctl_get_current_value arg2 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+
+ /* sysctl_get_current_value arg3 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_3, 8),
+
+ /* sysctl_get_current_value(ctx, buf, buf_len) */
+ BPF_EMIT_CALL(BPF_FUNC_sysctl_get_current_value),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 6, 6),
+
+ /* buf[0:6] == "Linux\n\0") */
+ BPF_LD_IMM64(BPF_REG_8, 0x000a78756e694cULL),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "kernel/ostype",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ .descr = "sysctl_get_current_value sysctl:read ok, eq",
+ .insns = {
+ /* sysctl_get_current_value arg2 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_7, BPF_REG_0, 7),
+
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+
+ /* sysctl_get_current_value arg3 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_3, 7),
+
+ /* sysctl_get_current_value(ctx, buf, buf_len) */
+ BPF_EMIT_CALL(BPF_FUNC_sysctl_get_current_value),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 6, 6),
+
+ /* buf[0:6] == "Linux\n\0") */
+ BPF_LD_IMM64(BPF_REG_8, 0x000a78756e694cULL),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "kernel/ostype",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ .descr = "sysctl_get_current_value sysctl:read E2BIG truncated",
+ .insns = {
+ /* sysctl_get_current_value arg2 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_7, BPF_REG_0, 6),
+
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+
+ /* sysctl_get_current_value arg3 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_3, 6),
+
+ /* sysctl_get_current_value(ctx, buf, buf_len) */
+ BPF_EMIT_CALL(BPF_FUNC_sysctl_get_current_value),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -E2BIG, 6),
+
+ /* buf[0:6] == "Linux\0") */
+ BPF_LD_IMM64(BPF_REG_8, 0x000078756e694cULL),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "kernel/ostype",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ .descr = "sysctl_get_current_value sysctl:read EINVAL",
+ .insns = {
+ /* sysctl_get_current_value arg2 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+
+ /* sysctl_get_current_value arg3 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_3, 8),
+
+ /* sysctl_get_current_value(ctx, buf, buf_len) */
+ BPF_EMIT_CALL(BPF_FUNC_sysctl_get_current_value),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -EINVAL, 4),
+
+ /* buf[0:8] is NUL-filled) */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 2),
+
+ /* return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_A(1),
+
+ /* else return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv6/conf/lo/stable_secret", /* -EIO */
+ .open_flags = O_RDONLY,
+ .result = OP_EPERM,
+ },
+ {
+ .descr = "sysctl_get_current_value sysctl:write ok",
+ .fixup_value_insn = 6,
+ .insns = {
+ /* sysctl_get_current_value arg2 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+
+ /* sysctl_get_current_value arg3 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_3, 8),
+
+ /* sysctl_get_current_value(ctx, buf, buf_len) */
+ BPF_EMIT_CALL(BPF_FUNC_sysctl_get_current_value),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 4, 6),
+
+ /* buf[0:4] == expected) */
+ BPF_LD_IMM64(BPF_REG_8, FIXUP_SYSCTL_VALUE),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
+
+ /* return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_A(1),
+
+ /* else return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/route/mtu_expires",
+ .open_flags = O_WRONLY,
+ .newval = "600", /* same as default, should fail anyway */
+ .result = OP_EPERM,
+ },
+ {
+ .descr = "sysctl_get_new_value sysctl:read EINVAL",
+ .insns = {
+ /* sysctl_get_new_value arg2 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+
+ /* sysctl_get_new_value arg3 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_3, 8),
+
+ /* sysctl_get_new_value(ctx, buf, buf_len) */
+ BPF_EMIT_CALL(BPF_FUNC_sysctl_get_new_value),
+
+ /* if (ret == expected) */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -EINVAL, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/tcp_mem",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ .descr = "sysctl_get_new_value sysctl:write ok",
+ .insns = {
+ /* sysctl_get_new_value arg2 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+
+ /* sysctl_get_new_value arg3 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_3, 4),
+
+ /* sysctl_get_new_value(ctx, buf, buf_len) */
+ BPF_EMIT_CALL(BPF_FUNC_sysctl_get_new_value),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 3, 4),
+
+ /* buf[0:4] == "606\0") */
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0x00363036, 2),
+
+ /* return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_A(1),
+
+ /* else return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/route/mtu_expires",
+ .open_flags = O_WRONLY,
+ .newval = "606",
+ .result = OP_EPERM,
+ },
+ {
+ .descr = "sysctl_get_new_value sysctl:write ok long",
+ .insns = {
+ /* sysctl_get_new_value arg2 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -24),
+
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+
+ /* sysctl_get_new_value arg3 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_3, 24),
+
+ /* sysctl_get_new_value(ctx, buf, buf_len) */
+ BPF_EMIT_CALL(BPF_FUNC_sysctl_get_new_value),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 23, 14),
+
+ /* buf[0:8] == "3000000 " && */
+ BPF_LD_IMM64(BPF_REG_8, 0x2030303030303033ULL),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 10),
+
+ /* buf[8:16] == "4000000 " && */
+ BPF_LD_IMM64(BPF_REG_8, 0x2030303030303034ULL),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 8),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 6),
+
+ /* buf[16:24] == "6000000\0") */
+ BPF_LD_IMM64(BPF_REG_8, 0x0030303030303036ULL),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 16),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
+
+ /* return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_A(1),
+
+ /* else return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/tcp_mem",
+ .open_flags = O_WRONLY,
+ .newval = "3000000 4000000 6000000",
+ .result = OP_EPERM,
+ },
+ {
+ .descr = "sysctl_get_new_value sysctl:write E2BIG",
+ .insns = {
+ /* sysctl_get_new_value arg2 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_7, BPF_REG_0, 3),
+
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+
+ /* sysctl_get_new_value arg3 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_3, 3),
+
+ /* sysctl_get_new_value(ctx, buf, buf_len) */
+ BPF_EMIT_CALL(BPF_FUNC_sysctl_get_new_value),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -E2BIG, 4),
+
+ /* buf[0:3] == "60\0") */
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0x003036, 2),
+
+ /* return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_A(1),
+
+ /* else return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/route/mtu_expires",
+ .open_flags = O_WRONLY,
+ .newval = "606",
+ .result = OP_EPERM,
+ },
+ {
+ .descr = "sysctl_set_new_value sysctl:read EINVAL",
+ .insns = {
+ /* sysctl_set_new_value arg2 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0x00303036),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+
+ /* sysctl_set_new_value arg3 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_3, 3),
+
+ /* sysctl_set_new_value(ctx, buf, buf_len) */
+ BPF_EMIT_CALL(BPF_FUNC_sysctl_set_new_value),
+
+ /* if (ret == expected) */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -EINVAL, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/route/mtu_expires",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ .descr = "sysctl_set_new_value sysctl:write ok",
+ .fixup_value_insn = 2,
+ .insns = {
+ /* sysctl_set_new_value arg2 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, FIXUP_SYSCTL_VALUE),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+
+ /* sysctl_set_new_value arg3 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_3, 3),
+
+ /* sysctl_set_new_value(ctx, buf, buf_len) */
+ BPF_EMIT_CALL(BPF_FUNC_sysctl_set_new_value),
+
+ /* if (ret == expected) */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/route/mtu_expires",
+ .open_flags = O_WRONLY,
+ .newval = "606",
+ .result = SUCCESS,
+ },
+ {
+ "bpf_strtoul one number string",
+ .insns = {
+ /* arg1 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0x00303036),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+
+ /* arg2 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+
+ /* arg3 (flags) */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+
+ /* arg4 (res) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
+
+ BPF_EMIT_CALL(BPF_FUNC_strtoul),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 3, 4),
+ /* res == expected) */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 600, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/route/mtu_expires",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ "bpf_strtoul multi number string",
+ .insns = {
+ /* arg1 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ /* "600 602\0" */
+ BPF_LD_IMM64(BPF_REG_0, 0x0032303620303036ULL),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+
+ /* arg2 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+
+ /* arg3 (flags) */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+
+ /* arg4 (res) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
+
+ BPF_EMIT_CALL(BPF_FUNC_strtoul),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 3, 18),
+ /* res == expected) */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 600, 16),
+
+ /* arg1 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+
+ /* arg2 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_0),
+
+ /* arg3 (flags) */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+
+ /* arg4 (res) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -16),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
+
+ BPF_EMIT_CALL(BPF_FUNC_strtoul),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 4, 4),
+ /* res == expected) */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 602, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/tcp_mem",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ "bpf_strtoul buf_len = 0, reject",
+ .insns = {
+ /* arg1 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0x00303036),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+
+ /* arg2 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+
+ /* arg3 (flags) */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+
+ /* arg4 (res) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
+
+ BPF_EMIT_CALL(BPF_FUNC_strtoul),
+
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/route/mtu_expires",
+ .open_flags = O_RDONLY,
+ .result = LOAD_REJECT,
+ },
+ {
+ "bpf_strtoul supported base, ok",
+ .insns = {
+ /* arg1 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0x00373730),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+
+ /* arg2 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+
+ /* arg3 (flags) */
+ BPF_MOV64_IMM(BPF_REG_3, 8),
+
+ /* arg4 (res) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
+
+ BPF_EMIT_CALL(BPF_FUNC_strtoul),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 3, 4),
+ /* res == expected) */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 63, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/route/mtu_expires",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ "bpf_strtoul unsupported base, EINVAL",
+ .insns = {
+ /* arg1 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0x00303036),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+
+ /* arg2 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+
+ /* arg3 (flags) */
+ BPF_MOV64_IMM(BPF_REG_3, 3),
+
+ /* arg4 (res) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
+
+ BPF_EMIT_CALL(BPF_FUNC_strtoul),
+
+ /* if (ret == expected) */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -EINVAL, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/route/mtu_expires",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ "bpf_strtoul buf with spaces only, EINVAL",
+ .insns = {
+ /* arg1 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0x090a0c0d),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+
+ /* arg2 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+
+ /* arg3 (flags) */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+
+ /* arg4 (res) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
+
+ BPF_EMIT_CALL(BPF_FUNC_strtoul),
+
+ /* if (ret == expected) */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -EINVAL, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/route/mtu_expires",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ "bpf_strtoul negative number, EINVAL",
+ .insns = {
+ /* arg1 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0x00362d0a), /* " -6\0" */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+
+ /* arg2 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+
+ /* arg3 (flags) */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+
+ /* arg4 (res) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
+
+ BPF_EMIT_CALL(BPF_FUNC_strtoul),
+
+ /* if (ret == expected) */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -EINVAL, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/route/mtu_expires",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ "bpf_strtol negative number, ok",
+ .insns = {
+ /* arg1 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0x00362d0a), /* " -6\0" */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+
+ /* arg2 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+
+ /* arg3 (flags) */
+ BPF_MOV64_IMM(BPF_REG_3, 10),
+
+ /* arg4 (res) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
+
+ BPF_EMIT_CALL(BPF_FUNC_strtol),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 3, 4),
+ /* res == expected) */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_9, -6, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/route/mtu_expires",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ "bpf_strtol hex number, ok",
+ .insns = {
+ /* arg1 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0x65667830), /* "0xfe" */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+
+ /* arg2 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+
+ /* arg3 (flags) */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+
+ /* arg4 (res) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
+
+ BPF_EMIT_CALL(BPF_FUNC_strtol),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 4, 4),
+ /* res == expected) */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 254, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/route/mtu_expires",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ "bpf_strtol max long",
+ .insns = {
+ /* arg1 (buf) 9223372036854775807 */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -24),
+ BPF_LD_IMM64(BPF_REG_0, 0x3032373333323239ULL),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0x3537373435383633ULL),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 8),
+ BPF_LD_IMM64(BPF_REG_0, 0x0000000000373038ULL),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 16),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+
+ /* arg2 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_2, 19),
+
+ /* arg3 (flags) */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+
+ /* arg4 (res) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
+
+ BPF_EMIT_CALL(BPF_FUNC_strtol),
+
+ /* if (ret == expected && */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 19, 6),
+ /* res == expected) */
+ BPF_LD_IMM64(BPF_REG_8, 0x7fffffffffffffffULL),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/route/mtu_expires",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ "bpf_strtol overflow, ERANGE",
+ .insns = {
+ /* arg1 (buf) 9223372036854775808 */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -24),
+ BPF_LD_IMM64(BPF_REG_0, 0x3032373333323239ULL),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0x3537373435383633ULL),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 8),
+ BPF_LD_IMM64(BPF_REG_0, 0x0000000000383038ULL),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 16),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+
+ /* arg2 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_2, 19),
+
+ /* arg3 (flags) */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+
+ /* arg4 (res) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
+
+ BPF_EMIT_CALL(BPF_FUNC_strtol),
+
+ /* if (ret == expected) */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -ERANGE, 2),
+
+ /* return ALLOW; */
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_A(1),
+
+ /* else return DENY; */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/route/mtu_expires",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+ {
+ "C prog: deny all writes",
+ .prog_file = "./test_sysctl_prog.o",
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/tcp_mem",
+ .open_flags = O_WRONLY,
+ .newval = "123 456 789",
+ .result = OP_EPERM,
+ },
+ {
+ "C prog: deny access by name",
+ .prog_file = "./test_sysctl_prog.o",
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/route/mtu_expires",
+ .open_flags = O_RDONLY,
+ .result = OP_EPERM,
+ },
+ {
+ "C prog: read tcp_mem",
+ .prog_file = "./test_sysctl_prog.o",
+ .attach_type = BPF_CGROUP_SYSCTL,
+ .sysctl = "net/ipv4/tcp_mem",
+ .open_flags = O_RDONLY,
+ .result = SUCCESS,
+ },
+};
+
+static size_t probe_prog_length(const struct bpf_insn *fp)
+{
+ size_t len;
+
+ for (len = MAX_INSNS - 1; len > 0; --len)
+ if (fp[len].code != 0 || fp[len].imm != 0)
+ break;
+ return len + 1;
+}
+
+static int fixup_sysctl_value(const char *buf, size_t buf_len,
+ struct bpf_insn *prog, size_t insn_num)
+{
+ uint32_t value_num = 0;
+ uint8_t c, i;
+
+ if (buf_len > sizeof(value_num)) {
+ log_err("Value is too big (%zd) to use in fixup", buf_len);
+ return -1;
+ }
+
+ for (i = 0; i < buf_len; ++i) {
+ c = buf[i];
+ value_num |= (c << i * 8);
+ }
+
+ prog[insn_num].imm = value_num;
+
+ return 0;
+}
+
+static int load_sysctl_prog_insns(struct sysctl_test *test,
+ const char *sysctl_path)
+{
+ struct bpf_insn *prog = test->insns;
+ struct bpf_load_program_attr attr;
+ int ret;
+
+ memset(&attr, 0, sizeof(struct bpf_load_program_attr));
+ attr.prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL;
+ attr.insns = prog;
+ attr.insns_cnt = probe_prog_length(attr.insns);
+ attr.license = "GPL";
+
+ if (test->fixup_value_insn) {
+ char buf[128];
+ ssize_t len;
+ int fd;
+
+ fd = open(sysctl_path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ log_err("open(%s) failed", sysctl_path);
+ return -1;
+ }
+ len = read(fd, buf, sizeof(buf));
+ if (len == -1) {
+ log_err("read(%s) failed", sysctl_path);
+ close(fd);
+ return -1;
+ }
+ close(fd);
+ if (fixup_sysctl_value(buf, len, prog, test->fixup_value_insn))
+ return -1;
+ }
+
+ ret = bpf_load_program_xattr(&attr, bpf_log_buf, BPF_LOG_BUF_SIZE);
+ if (ret < 0 && test->result != LOAD_REJECT) {
+ log_err(">>> Loading program error.\n"
+ ">>> Verifier output:\n%s\n-------\n", bpf_log_buf);
+ }
+
+ return ret;
+}
+
+static int load_sysctl_prog_file(struct sysctl_test *test)
+{
+ struct bpf_prog_load_attr attr;
+ struct bpf_object *obj;
+ int prog_fd;
+
+ memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
+ attr.file = test->prog_file;
+ attr.prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL;
+
+ if (bpf_prog_load_xattr(&attr, &obj, &prog_fd)) {
+ if (test->result != LOAD_REJECT)
+ log_err(">>> Loading program (%s) error.\n",
+ test->prog_file);
+ return -1;
+ }
+
+ return prog_fd;
+}
+
+static int load_sysctl_prog(struct sysctl_test *test, const char *sysctl_path)
+{
+ return test->prog_file
+ ? load_sysctl_prog_file(test)
+ : load_sysctl_prog_insns(test, sysctl_path);
+}
+
+static int access_sysctl(const char *sysctl_path,
+ const struct sysctl_test *test)
+{
+ int err = 0;
+ int fd;
+
+ fd = open(sysctl_path, test->open_flags | O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ if (test->open_flags == O_RDONLY) {
+ char buf[128];
+
+ if (read(fd, buf, sizeof(buf)) == -1)
+ goto err;
+ if (test->oldval &&
+ strncmp(buf, test->oldval, strlen(test->oldval))) {
+ log_err("Read value %s != %s", buf, test->oldval);
+ goto err;
+ }
+ } else if (test->open_flags == O_WRONLY) {
+ if (!test->newval) {
+ log_err("New value for sysctl is not set");
+ goto err;
+ }
+ if (write(fd, test->newval, strlen(test->newval)) == -1)
+ goto err;
+ } else {
+ log_err("Unexpected sysctl access: neither read nor write");
+ goto err;
+ }
+
+ goto out;
+err:
+ err = -1;
+out:
+ close(fd);
+ return err;
+}
+
+static int run_test_case(int cgfd, struct sysctl_test *test)
+{
+ enum bpf_attach_type atype = test->attach_type;
+ char sysctl_path[128];
+ int progfd = -1;
+ int err = 0;
+
+ printf("Test case: %s .. ", test->descr);
+
+ snprintf(sysctl_path, sizeof(sysctl_path), "/proc/sys/%s",
+ test->sysctl);
+
+ progfd = load_sysctl_prog(test, sysctl_path);
+ if (progfd < 0) {
+ if (test->result == LOAD_REJECT)
+ goto out;
+ else
+ goto err;
+ }
+
+ if (bpf_prog_attach(progfd, cgfd, atype, BPF_F_ALLOW_OVERRIDE) == -1) {
+ if (test->result == ATTACH_REJECT)
+ goto out;
+ else
+ goto err;
+ }
+
+ if (access_sysctl(sysctl_path, test) == -1) {
+ if (test->result == OP_EPERM && errno == EPERM)
+ goto out;
+ else
+ goto err;
+ }
+
+ if (test->result != SUCCESS) {
+ log_err("Unexpected failure");
+ goto err;
+ }
+
+ goto out;
+err:
+ err = -1;
+out:
+ /* Detaching w/o checking return code: best effort attempt. */
+ if (progfd != -1)
+ bpf_prog_detach(cgfd, atype);
+ close(progfd);
+ printf("[%s]\n", err ? "FAIL" : "PASS");
+ return err;
+}
+
+static int run_tests(int cgfd)
+{
+ int passes = 0;
+ int fails = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tests); ++i) {
+ if (run_test_case(cgfd, &tests[i]))
+ ++fails;
+ else
+ ++passes;
+ }
+ printf("Summary: %d PASSED, %d FAILED\n", passes, fails);
+ return fails ? -1 : 0;
+}
+
+int main(int argc, char **argv)
+{
+ int cgfd = -1;
+ int err = 0;
+
+ if (setup_cgroup_environment())
+ goto err;
+
+ cgfd = create_and_get_cgroup(CG_PATH);
+ if (cgfd < 0)
+ goto err;
+
+ if (join_cgroup(CG_PATH))
+ goto err;
+
+ if (run_tests(cgfd))
+ goto err;
+
+ goto out;
+err:
+ err = -1;
+out:
+ close(cgfd);
+ cleanup_cgroup_environment();
+ return err;
+}
diff --git a/tools/testing/selftests/bpf/test_tc_edt.sh b/tools/testing/selftests/bpf/test_tc_edt.sh
new file mode 100755
index 000000000000..f38567ef694b
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tc_edt.sh
@@ -0,0 +1,99 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# This test installs a TC bpf program that throttles a TCP flow
+# with dst port = 9000 down to 5MBps. Then it measures actual
+# throughput of the flow.
+
+if [[ $EUID -ne 0 ]]; then
+ echo "This script must be run as root"
+ echo "FAIL"
+ exit 1
+fi
+
+# check that nc, dd, and timeout are present
+command -v nc >/dev/null 2>&1 || \
+ { echo >&2 "nc is not available"; exit 1; }
+command -v dd >/dev/null 2>&1 || \
+ { echo >&2 "nc is not available"; exit 1; }
+command -v timeout >/dev/null 2>&1 || \
+ { echo >&2 "timeout is not available"; exit 1; }
+
+readonly NS_SRC="ns-src-$(mktemp -u XXXXXX)"
+readonly NS_DST="ns-dst-$(mktemp -u XXXXXX)"
+
+readonly IP_SRC="172.16.1.100"
+readonly IP_DST="172.16.2.100"
+
+cleanup()
+{
+ ip netns del ${NS_SRC}
+ ip netns del ${NS_DST}
+}
+
+trap cleanup EXIT
+
+set -e # exit on error
+
+ip netns add "${NS_SRC}"
+ip netns add "${NS_DST}"
+ip link add veth_src type veth peer name veth_dst
+ip link set veth_src netns ${NS_SRC}
+ip link set veth_dst netns ${NS_DST}
+
+ip -netns ${NS_SRC} addr add ${IP_SRC}/24 dev veth_src
+ip -netns ${NS_DST} addr add ${IP_DST}/24 dev veth_dst
+
+ip -netns ${NS_SRC} link set dev veth_src up
+ip -netns ${NS_DST} link set dev veth_dst up
+
+ip -netns ${NS_SRC} route add ${IP_DST}/32 dev veth_src
+ip -netns ${NS_DST} route add ${IP_SRC}/32 dev veth_dst
+
+# set up TC on TX
+ip netns exec ${NS_SRC} tc qdisc add dev veth_src root fq
+ip netns exec ${NS_SRC} tc qdisc add dev veth_src clsact
+ip netns exec ${NS_SRC} tc filter add dev veth_src egress \
+ bpf da obj test_tc_edt.o sec cls_test
+
+
+# start the listener
+ip netns exec ${NS_DST} bash -c \
+ "nc -4 -l -s ${IP_DST} -p 9000 >/dev/null &"
+declare -i NC_PID=$!
+sleep 1
+
+declare -ir TIMEOUT=20
+declare -ir EXPECTED_BPS=5000000
+
+# run the load, capture RX bytes on DST
+declare -ir RX_BYTES_START=$( ip netns exec ${NS_DST} \
+ cat /sys/class/net/veth_dst/statistics/rx_bytes )
+
+set +e
+ip netns exec ${NS_SRC} bash -c "timeout ${TIMEOUT} dd if=/dev/zero \
+ bs=1000 count=1000000 > /dev/tcp/${IP_DST}/9000 2>/dev/null"
+set -e
+
+declare -ir RX_BYTES_END=$( ip netns exec ${NS_DST} \
+ cat /sys/class/net/veth_dst/statistics/rx_bytes )
+
+declare -ir ACTUAL_BPS=$(( ($RX_BYTES_END - $RX_BYTES_START) / $TIMEOUT ))
+
+echo $TIMEOUT $ACTUAL_BPS $EXPECTED_BPS | \
+ awk '{printf "elapsed: %d sec; bps difference: %.2f%%\n",
+ $1, ($2-$3)*100.0/$3}'
+
+# Pass the test if the actual bps is within 1% of the expected bps.
+# The difference is usually about 0.1% on a 20-sec test, and ==> zero
+# the longer the test runs.
+declare -ir RES=$( echo $ACTUAL_BPS $EXPECTED_BPS | \
+ awk 'function abs(x){return ((x < 0.0) ? -x : x)}
+ {if (abs(($1-$2)*100.0/$2) > 1.0) { print "1" }
+ else { print "0"} }' )
+if [ "${RES}" == "0" ] ; then
+ echo "PASS"
+else
+ echo "FAIL"
+ exit 1
+fi
diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
new file mode 100755
index 000000000000..ff0d31d38061
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
@@ -0,0 +1,290 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# In-place tunneling
+
+# must match the port that the bpf program filters on
+readonly port=8000
+
+readonly ns_prefix="ns-$$-"
+readonly ns1="${ns_prefix}1"
+readonly ns2="${ns_prefix}2"
+
+readonly ns1_v4=192.168.1.1
+readonly ns2_v4=192.168.1.2
+readonly ns1_v6=fd::1
+readonly ns2_v6=fd::2
+
+# Must match port used by bpf program
+readonly udpport=5555
+# MPLSoverUDP
+readonly mplsudpport=6635
+readonly mplsproto=137
+
+readonly infile="$(mktemp)"
+readonly outfile="$(mktemp)"
+
+setup() {
+ ip netns add "${ns1}"
+ ip netns add "${ns2}"
+
+ ip link add dev veth1 mtu 1500 netns "${ns1}" type veth \
+ peer name veth2 mtu 1500 netns "${ns2}"
+
+ ip netns exec "${ns1}" ethtool -K veth1 tso off
+
+ ip -netns "${ns1}" link set veth1 up
+ ip -netns "${ns2}" link set veth2 up
+
+ ip -netns "${ns1}" -4 addr add "${ns1_v4}/24" dev veth1
+ ip -netns "${ns2}" -4 addr add "${ns2_v4}/24" dev veth2
+ ip -netns "${ns1}" -6 addr add "${ns1_v6}/64" dev veth1 nodad
+ ip -netns "${ns2}" -6 addr add "${ns2_v6}/64" dev veth2 nodad
+
+ # clamp route to reserve room for tunnel headers
+ ip -netns "${ns1}" -4 route flush table main
+ ip -netns "${ns1}" -6 route flush table main
+ ip -netns "${ns1}" -4 route add "${ns2_v4}" mtu 1458 dev veth1
+ ip -netns "${ns1}" -6 route add "${ns2_v6}" mtu 1438 dev veth1
+
+ sleep 1
+
+ dd if=/dev/urandom of="${infile}" bs="${datalen}" count=1 status=none
+}
+
+cleanup() {
+ ip netns del "${ns2}"
+ ip netns del "${ns1}"
+
+ if [[ -f "${outfile}" ]]; then
+ rm "${outfile}"
+ fi
+ if [[ -f "${infile}" ]]; then
+ rm "${infile}"
+ fi
+}
+
+server_listen() {
+ ip netns exec "${ns2}" nc "${netcat_opt}" -l -p "${port}" > "${outfile}" &
+ server_pid=$!
+ sleep 0.2
+}
+
+client_connect() {
+ ip netns exec "${ns1}" timeout 2 nc "${netcat_opt}" -w 1 "${addr2}" "${port}" < "${infile}"
+ echo $?
+}
+
+verify_data() {
+ wait "${server_pid}"
+ # sha1sum returns two fields [sha1] [filepath]
+ # convert to bash array and access first elem
+ insum=($(sha1sum ${infile}))
+ outsum=($(sha1sum ${outfile}))
+ if [[ "${insum[0]}" != "${outsum[0]}" ]]; then
+ echo "data mismatch"
+ exit 1
+ fi
+}
+
+set -e
+
+# no arguments: automated test, run all
+if [[ "$#" -eq "0" ]]; then
+ echo "ipip"
+ $0 ipv4 ipip none 100
+
+ echo "ip6ip6"
+ $0 ipv6 ip6tnl none 100
+
+ echo "sit"
+ $0 ipv6 sit none 100
+
+ for mac in none mpls eth ; do
+ echo "ip gre $mac"
+ $0 ipv4 gre $mac 100
+
+ echo "ip6 gre $mac"
+ $0 ipv6 ip6gre $mac 100
+
+ echo "ip gre $mac gso"
+ $0 ipv4 gre $mac 2000
+
+ echo "ip6 gre $mac gso"
+ $0 ipv6 ip6gre $mac 2000
+
+ echo "ip udp $mac"
+ $0 ipv4 udp $mac 100
+
+ echo "ip6 udp $mac"
+ $0 ipv6 ip6udp $mac 100
+
+ echo "ip udp $mac gso"
+ $0 ipv4 udp $mac 2000
+
+ echo "ip6 udp $mac gso"
+ $0 ipv6 ip6udp $mac 2000
+ done
+
+ echo "OK. All tests passed"
+ exit 0
+fi
+
+if [[ "$#" -ne "4" ]]; then
+ echo "Usage: $0"
+ echo " or: $0 <ipv4|ipv6> <tuntype> <none|mpls|eth> <data_len>"
+ exit 1
+fi
+
+case "$1" in
+"ipv4")
+ readonly addr1="${ns1_v4}"
+ readonly addr2="${ns2_v4}"
+ readonly ipproto=4
+ readonly netcat_opt=-${ipproto}
+ readonly foumod=fou
+ readonly foutype=ipip
+ readonly fouproto=4
+ readonly fouproto_mpls=${mplsproto}
+ readonly gretaptype=gretap
+ ;;
+"ipv6")
+ readonly addr1="${ns1_v6}"
+ readonly addr2="${ns2_v6}"
+ readonly ipproto=6
+ readonly netcat_opt=-${ipproto}
+ readonly foumod=fou6
+ readonly foutype=ip6tnl
+ readonly fouproto="41 -6"
+ readonly fouproto_mpls="${mplsproto} -6"
+ readonly gretaptype=ip6gretap
+ ;;
+*)
+ echo "unknown arg: $1"
+ exit 1
+ ;;
+esac
+
+readonly tuntype=$2
+readonly mac=$3
+readonly datalen=$4
+
+echo "encap ${addr1} to ${addr2}, type ${tuntype}, mac ${mac} len ${datalen}"
+
+trap cleanup EXIT
+
+setup
+
+# basic communication works
+echo "test basic connectivity"
+server_listen
+client_connect
+verify_data
+
+# clientside, insert bpf program to encap all TCP to port ${port}
+# client can no longer connect
+ip netns exec "${ns1}" tc qdisc add dev veth1 clsact
+ip netns exec "${ns1}" tc filter add dev veth1 egress \
+ bpf direct-action object-file ./test_tc_tunnel.o \
+ section "encap_${tuntype}_${mac}"
+echo "test bpf encap without decap (expect failure)"
+server_listen
+! client_connect
+
+if [[ "$tuntype" =~ "udp" ]]; then
+ # Set up fou tunnel.
+ ttype="${foutype}"
+ targs="encap fou encap-sport auto encap-dport $udpport"
+ # fou may be a module; allow this to fail.
+ modprobe "${foumod}" ||true
+ if [[ "$mac" == "mpls" ]]; then
+ dport=${mplsudpport}
+ dproto=${fouproto_mpls}
+ tmode="mode any ttl 255"
+ else
+ dport=${udpport}
+ dproto=${fouproto}
+ fi
+ ip netns exec "${ns2}" ip fou add port $dport ipproto ${dproto}
+ targs="encap fou encap-sport auto encap-dport $dport"
+elif [[ "$tuntype" =~ "gre" && "$mac" == "eth" ]]; then
+ ttype=$gretaptype
+else
+ ttype=$tuntype
+ targs=""
+fi
+
+# tunnel address family differs from inner for SIT
+if [[ "${tuntype}" == "sit" ]]; then
+ link_addr1="${ns1_v4}"
+ link_addr2="${ns2_v4}"
+else
+ link_addr1="${addr1}"
+ link_addr2="${addr2}"
+fi
+
+# serverside, insert decap module
+# server is still running
+# client can connect again
+ip netns exec "${ns2}" ip link add name testtun0 type "${ttype}" \
+ ${tmode} remote "${link_addr1}" local "${link_addr2}" $targs
+
+expect_tun_fail=0
+
+if [[ "$tuntype" == "ip6udp" && "$mac" == "mpls" ]]; then
+ # No support for MPLS IPv6 fou tunnel; expect failure.
+ expect_tun_fail=1
+elif [[ "$tuntype" =~ "udp" && "$mac" == "eth" ]]; then
+ # No support for TEB fou tunnel; expect failure.
+ expect_tun_fail=1
+elif [[ "$tuntype" =~ "gre" && "$mac" == "eth" ]]; then
+ # Share ethernet address between tunnel/veth2 so L2 decap works.
+ ethaddr=$(ip netns exec "${ns2}" ip link show veth2 | \
+ awk '/ether/ { print $2 }')
+ ip netns exec "${ns2}" ip link set testtun0 address $ethaddr
+elif [[ "$mac" == "mpls" ]]; then
+ modprobe mpls_iptunnel ||true
+ modprobe mpls_gso ||true
+ ip netns exec "${ns2}" sysctl -qw net.mpls.platform_labels=65536
+ ip netns exec "${ns2}" ip -f mpls route add 1000 dev lo
+ ip netns exec "${ns2}" ip link set lo up
+ ip netns exec "${ns2}" sysctl -qw net.mpls.conf.testtun0.input=1
+ ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.lo.rp_filter=0
+fi
+
+# Because packets are decapped by the tunnel they arrive on testtun0 from
+# the IP stack perspective. Ensure reverse path filtering is disabled
+# otherwise we drop the TCP SYN as arriving on testtun0 instead of the
+# expected veth2 (veth2 is where 192.168.1.2 is configured).
+ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.all.rp_filter=0
+# rp needs to be disabled for both all and testtun0 as the rp value is
+# selected as the max of the "all" and device-specific values.
+ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.testtun0.rp_filter=0
+ip netns exec "${ns2}" ip link set dev testtun0 up
+if [[ "$expect_tun_fail" == 1 ]]; then
+ # This tunnel mode is not supported, so we expect failure.
+ echo "test bpf encap with tunnel device decap (expect failure)"
+ ! client_connect
+else
+ echo "test bpf encap with tunnel device decap"
+ client_connect
+ verify_data
+ server_listen
+fi
+
+# bpf_skb_net_shrink does not take tunnel flags yet, cannot update L3.
+if [[ "${tuntype}" == "sit" ]]; then
+ echo OK
+ exit 0
+fi
+
+# serverside, use BPF for decap
+ip netns exec "${ns2}" ip link del dev testtun0
+ip netns exec "${ns2}" tc qdisc add dev veth2 clsact
+ip netns exec "${ns2}" tc filter add dev veth2 ingress \
+ bpf direct-action object-file ./test_tc_tunnel.o section decap
+echo "test bpf encap with bpf decap"
+client_connect
+verify_data
+
+echo OK
diff --git a/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh b/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh
new file mode 100755
index 000000000000..d48e51716d19
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tcp_check_syncookie.sh
@@ -0,0 +1,81 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2018 Facebook
+# Copyright (c) 2019 Cloudflare
+
+set -eu
+
+wait_for_ip()
+{
+ local _i
+ printf "Wait for IP %s to become available " "$1"
+ for _i in $(seq ${MAX_PING_TRIES}); do
+ printf "."
+ if ns1_exec ping -c 1 -W 1 "$1" >/dev/null 2>&1; then
+ echo " OK"
+ return
+ fi
+ sleep 1
+ done
+ echo 1>&2 "ERROR: Timeout waiting for test IP to become available."
+ exit 1
+}
+
+get_prog_id()
+{
+ awk '/ id / {sub(/.* id /, "", $0); print($1)}'
+}
+
+ns1_exec()
+{
+ ip netns exec ns1 "$@"
+}
+
+setup()
+{
+ ip netns add ns1
+ ns1_exec ip link set lo up
+
+ ns1_exec sysctl -w net.ipv4.tcp_syncookies=2
+
+ wait_for_ip 127.0.0.1
+ wait_for_ip ::1
+}
+
+cleanup()
+{
+ ip netns del ns1 2>/dev/null || :
+}
+
+main()
+{
+ trap cleanup EXIT 2 3 6 15
+ setup
+
+ printf "Testing clsact..."
+ ns1_exec tc qdisc add dev "${TEST_IF}" clsact
+ ns1_exec tc filter add dev "${TEST_IF}" ingress \
+ bpf obj "${BPF_PROG_OBJ}" sec "${CLSACT_SECTION}" da
+
+ BPF_PROG_ID=$(ns1_exec tc filter show dev "${TEST_IF}" ingress | \
+ get_prog_id)
+ ns1_exec "${PROG}" "${BPF_PROG_ID}"
+ ns1_exec tc qdisc del dev "${TEST_IF}" clsact
+
+ printf "Testing XDP..."
+ ns1_exec ip link set "${TEST_IF}" xdp \
+ object "${BPF_PROG_OBJ}" section "${XDP_SECTION}"
+ BPF_PROG_ID=$(ns1_exec ip link show "${TEST_IF}" | get_prog_id)
+ ns1_exec "${PROG}" "${BPF_PROG_ID}"
+}
+
+DIR=$(dirname $0)
+TEST_IF=lo
+MAX_PING_TRIES=5
+BPF_PROG_OBJ="${DIR}/test_tcp_check_syncookie_kern.o"
+CLSACT_SECTION="clsact/check_syncookie"
+XDP_SECTION="xdp/check_syncookie"
+BPF_PROG_ID=0
+PROG="${DIR}/test_tcp_check_syncookie_user"
+
+main
diff --git a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
new file mode 100644
index 000000000000..87829c86c746
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+// Copyright (c) 2019 Cloudflare
+
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <arpa/inet.h>
+#include <netinet/in.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include "bpf_rlimit.h"
+#include "cgroup_helpers.h"
+
+static int start_server(const struct sockaddr *addr, socklen_t len)
+{
+ int fd;
+
+ fd = socket(addr->sa_family, SOCK_STREAM, 0);
+ if (fd == -1) {
+ log_err("Failed to create server socket");
+ goto out;
+ }
+
+ if (bind(fd, addr, len) == -1) {
+ log_err("Failed to bind server socket");
+ goto close_out;
+ }
+
+ if (listen(fd, 128) == -1) {
+ log_err("Failed to listen on server socket");
+ goto close_out;
+ }
+
+ goto out;
+
+close_out:
+ close(fd);
+ fd = -1;
+out:
+ return fd;
+}
+
+static int connect_to_server(int server_fd)
+{
+ struct sockaddr_storage addr;
+ socklen_t len = sizeof(addr);
+ int fd = -1;
+
+ if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) {
+ log_err("Failed to get server addr");
+ goto out;
+ }
+
+ fd = socket(addr.ss_family, SOCK_STREAM, 0);
+ if (fd == -1) {
+ log_err("Failed to create client socket");
+ goto out;
+ }
+
+ if (connect(fd, (const struct sockaddr *)&addr, len) == -1) {
+ log_err("Fail to connect to server");
+ goto close_out;
+ }
+
+ goto out;
+
+close_out:
+ close(fd);
+ fd = -1;
+out:
+ return fd;
+}
+
+static int get_map_fd_by_prog_id(int prog_id)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ __u32 map_ids[1];
+ int prog_fd = -1;
+ int map_fd = -1;
+
+ prog_fd = bpf_prog_get_fd_by_id(prog_id);
+ if (prog_fd < 0) {
+ log_err("Failed to get fd by prog id %d", prog_id);
+ goto err;
+ }
+
+ info.nr_map_ids = 1;
+ info.map_ids = (__u64)(unsigned long)map_ids;
+
+ if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) {
+ log_err("Failed to get info by prog fd %d", prog_fd);
+ goto err;
+ }
+
+ if (!info.nr_map_ids) {
+ log_err("No maps found for prog fd %d", prog_fd);
+ goto err;
+ }
+
+ map_fd = bpf_map_get_fd_by_id(map_ids[0]);
+ if (map_fd < 0)
+ log_err("Failed to get fd by map id %d", map_ids[0]);
+err:
+ if (prog_fd >= 0)
+ close(prog_fd);
+ return map_fd;
+}
+
+static int run_test(int server_fd, int results_fd)
+{
+ int client = -1, srv_client = -1;
+ int ret = 0;
+ __u32 key = 0;
+ __u64 value = 0;
+
+ if (bpf_map_update_elem(results_fd, &key, &value, 0) < 0) {
+ log_err("Can't clear results");
+ goto err;
+ }
+
+ client = connect_to_server(server_fd);
+ if (client == -1)
+ goto err;
+
+ srv_client = accept(server_fd, NULL, 0);
+ if (srv_client == -1) {
+ log_err("Can't accept connection");
+ goto err;
+ }
+
+ if (bpf_map_lookup_elem(results_fd, &key, &value) < 0) {
+ log_err("Can't lookup result");
+ goto err;
+ }
+
+ if (value != 1) {
+ log_err("Didn't match syncookie: %llu", value);
+ goto err;
+ }
+
+ goto out;
+
+err:
+ ret = 1;
+out:
+ close(client);
+ close(srv_client);
+ return ret;
+}
+
+int main(int argc, char **argv)
+{
+ struct sockaddr_in addr4;
+ struct sockaddr_in6 addr6;
+ int server = -1;
+ int server_v6 = -1;
+ int results = -1;
+ int err = 0;
+
+ if (argc < 2) {
+ fprintf(stderr, "Usage: %s prog_id\n", argv[0]);
+ exit(1);
+ }
+
+ results = get_map_fd_by_prog_id(atoi(argv[1]));
+ if (results < 0) {
+ log_err("Can't get map");
+ goto err;
+ }
+
+ memset(&addr4, 0, sizeof(addr4));
+ addr4.sin_family = AF_INET;
+ addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ addr4.sin_port = 0;
+
+ memset(&addr6, 0, sizeof(addr6));
+ addr6.sin6_family = AF_INET6;
+ addr6.sin6_addr = in6addr_loopback;
+ addr6.sin6_port = 0;
+
+ server = start_server((const struct sockaddr *)&addr4, sizeof(addr4));
+ if (server == -1)
+ goto err;
+
+ server_v6 = start_server((const struct sockaddr *)&addr6,
+ sizeof(addr6));
+ if (server_v6 == -1)
+ goto err;
+
+ if (run_test(server, results))
+ goto err;
+
+ if (run_test(server_v6, results))
+ goto err;
+
+ printf("ok\n");
+ goto out;
+err:
+ err = 1;
+out:
+ close(server);
+ close(server_v6);
+ close(results);
+ return err;
+}
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c
index 4e4353711a86..86152d9ae95b 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_user.c
+++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c
@@ -148,17 +148,17 @@ int main(int argc, char **argv)
pthread_create(&tid, NULL, poller_thread, (void *)&pmu_fd);
sprintf(test_script,
- "/usr/sbin/iptables -A INPUT -p tcp --dport %d -j DROP",
+ "iptables -A INPUT -p tcp --dport %d -j DROP",
TESTPORT);
system(test_script);
sprintf(test_script,
- "/usr/bin/nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ",
+ "nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ",
TESTPORT);
system(test_script);
sprintf(test_script,
- "/usr/sbin/iptables -D INPUT -p tcp --dport %d -j DROP",
+ "iptables -D INPUT -p tcp --dport %d -j DROP",
TESTPORT);
system(test_script);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 2fd90d456892..288cb740e005 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Testsuite for eBPF verifier
*
* Copyright (c) 2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2017 Facebook
* Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
*/
#include <endian.h>
@@ -32,8 +29,10 @@
#include <linux/bpf_perf_event.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
+#include <linux/btf.h>
#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
#ifdef HAVE_GENHDR
# include "autoconf.h"
@@ -45,11 +44,13 @@
#include "bpf_rlimit.h"
#include "bpf_rand.h"
#include "bpf_util.h"
+#include "test_btf.h"
#include "../../../include/linux/filter.h"
#define MAX_INSNS BPF_MAXINSNS
+#define MAX_TEST_INSNS 1000000
#define MAX_FIXUPS 8
-#define MAX_NR_MAPS 13
+#define MAX_NR_MAPS 18
#define MAX_TEST_RUNS 8
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
@@ -59,10 +60,12 @@
#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
static bool unpriv_disabled = false;
+static int skips;
struct bpf_test {
const char *descr;
struct bpf_insn insns[MAX_INSNS];
+ struct bpf_insn *fill_insns;
int fixup_map_hash_8b[MAX_FIXUPS];
int fixup_map_hash_48b[MAX_FIXUPS];
int fixup_map_hash_16b[MAX_FIXUPS];
@@ -76,9 +79,15 @@ struct bpf_test {
int fixup_map_in_map[MAX_FIXUPS];
int fixup_cgroup_storage[MAX_FIXUPS];
int fixup_percpu_cgroup_storage[MAX_FIXUPS];
+ int fixup_map_spin_lock[MAX_FIXUPS];
+ int fixup_map_array_ro[MAX_FIXUPS];
+ int fixup_map_array_wo[MAX_FIXUPS];
+ int fixup_map_array_small[MAX_FIXUPS];
+ int fixup_sk_storage_map[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
uint32_t retval, retval_unpriv, insn_processed;
+ int prog_len;
enum {
UNDEF,
ACCEPT,
@@ -115,10 +124,11 @@ struct other_val {
static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
{
- /* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
+ /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
#define PUSH_CNT 51
- unsigned int len = BPF_MAXINSNS;
- struct bpf_insn *insn = self->insns;
+ /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
+ unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
+ struct bpf_insn *insn = self->fill_insns;
int i = 0, j, k = 0;
insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
@@ -152,12 +162,14 @@ loop:
for (; i < len - 1; i++)
insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
insn[len - 1] = BPF_EXIT_INSN();
+ self->prog_len = len;
}
static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
{
- struct bpf_insn *insn = self->insns;
- unsigned int len = BPF_MAXINSNS;
+ struct bpf_insn *insn = self->fill_insns;
+ /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns */
+ unsigned int len = (1 << 15) / 6;
int i = 0;
insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
@@ -167,11 +179,12 @@ static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
while (i < len - 1)
insn[i++] = BPF_LD_ABS(BPF_B, 1);
insn[i] = BPF_EXIT_INSN();
+ self->prog_len = i + 1;
}
static void bpf_fill_rand_ld_dw(struct bpf_test *self)
{
- struct bpf_insn *insn = self->insns;
+ struct bpf_insn *insn = self->fill_insns;
uint64_t res = 0;
int i = 0;
@@ -189,12 +202,83 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self)
insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
insn[i] = BPF_EXIT_INSN();
+ self->prog_len = i + 1;
res ^= (res >> 32);
self->retval = (uint32_t)res;
}
+/* test the sequence of 1k jumps */
+static void bpf_fill_scale1(struct bpf_test *self)
+{
+ struct bpf_insn *insn = self->fill_insns;
+ int i = 0, k = 0;
+
+ insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
+ /* test to check that the sequence of 1024 jumps is acceptable */
+ while (k++ < 1024) {
+ insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_prandom_u32);
+ insn[i++] = BPF_JMP_IMM(BPF_JGT, BPF_REG_0, bpf_semi_rand_get(), 2);
+ insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
+ insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
+ -8 * (k % 64 + 1));
+ }
+ /* every jump adds 1024 steps to insn_processed, so to stay exactly
+ * within 1m limit add MAX_TEST_INSNS - 1025 MOVs and 1 EXIT
+ */
+ while (i < MAX_TEST_INSNS - 1025)
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 42);
+ insn[i] = BPF_EXIT_INSN();
+ self->prog_len = i + 1;
+ self->retval = 42;
+}
+
+/* test the sequence of 1k jumps in inner most function (function depth 8)*/
+static void bpf_fill_scale2(struct bpf_test *self)
+{
+ struct bpf_insn *insn = self->fill_insns;
+ int i = 0, k = 0;
+
+#define FUNC_NEST 7
+ for (k = 0; k < FUNC_NEST; k++) {
+ insn[i++] = BPF_CALL_REL(1);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+ insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
+ /* test to check that the sequence of 1024 jumps is acceptable */
+ while (k++ < 1024) {
+ insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_prandom_u32);
+ insn[i++] = BPF_JMP_IMM(BPF_JGT, BPF_REG_0, bpf_semi_rand_get(), 2);
+ insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
+ insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
+ -8 * (k % (64 - 4 * FUNC_NEST) + 1));
+ }
+ /* every jump adds 1024 steps to insn_processed, so to stay exactly
+ * within 1m limit add MAX_TEST_INSNS - 1025 MOVs and 1 EXIT
+ */
+ while (i < MAX_TEST_INSNS - 1025)
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 42);
+ insn[i] = BPF_EXIT_INSN();
+ self->prog_len = i + 1;
+ self->retval = 42;
+}
+
+static void bpf_fill_scale(struct bpf_test *self)
+{
+ switch (self->retval) {
+ case 1:
+ return bpf_fill_scale1(self);
+ case 2:
+ return bpf_fill_scale2(self);
+ default:
+ self->prog_len = 0;
+ break;
+ }
+}
+
/* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
-#define BPF_SK_LOOKUP \
+#define BPF_SK_LOOKUP(func) \
/* struct bpf_sock_tuple tuple = {} */ \
BPF_MOV64_IMM(BPF_REG_2, 0), \
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
@@ -203,15402 +287,54 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self)
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
- /* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */ \
+ /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
BPF_MOV64_IMM(BPF_REG_4, 0), \
BPF_MOV64_IMM(BPF_REG_5, 0), \
- BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
-
-static struct bpf_test tests[] = {
- {
- "add+sub+mul",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_2, 3),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = -3,
- },
- {
- "DIV32 by 0, zero check 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "DIV32 by 0, zero check 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "DIV64 by 0, zero check",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "MOD32 by 0, zero check 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "MOD32 by 0, zero check 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "MOD64 by 0, zero check",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1),
- BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "DIV32 by 0, zero check ok, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 2),
- BPF_MOV32_IMM(BPF_REG_2, 16),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 8,
- },
- {
- "DIV32 by 0, zero check 1, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV32 by 0, zero check 2, cls",
- .insns = {
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV64 by 0, zero check, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "MOD32 by 0, zero check ok, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_MOV32_IMM(BPF_REG_1, 3),
- BPF_MOV32_IMM(BPF_REG_2, 5),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "MOD32 by 0, zero check 1, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "MOD32 by 0, zero check 2, cls",
- .insns = {
- BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "MOD64 by 0, zero check 1, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, 2),
- BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "MOD64 by 0, zero check 2, cls",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_0, -1),
- BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = -1,
- },
- /* Just make sure that JITs used udiv/umod as otherwise we get
- * an exception from INT_MIN/-1 overflow similarly as with div
- * by zero.
- */
- {
- "DIV32 overflow, check 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV32 overflow, check 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
- BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV64 overflow, check 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
- BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "DIV64 overflow, check 2",
- .insns = {
- BPF_LD_IMM64(BPF_REG_0, LLONG_MIN),
- BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "MOD32 overflow, check 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
- BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = INT_MIN,
- },
- {
- "MOD32 overflow, check 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
- BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = INT_MIN,
- },
- {
- "MOD64 overflow, check 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "MOD64 overflow, check 2",
- .insns = {
- BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "xor32 zero extend check",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_2, -1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
- BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
- BPF_MOV32_IMM(BPF_REG_0, 2),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
- BPF_MOV32_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "empty prog",
- .insns = {
- },
- .errstr = "unknown opcode 00",
- .result = REJECT,
- },
- {
- "only exit insn",
- .insns = {
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 !read_ok",
- .result = REJECT,
- },
- {
- "unreachable",
- .insns = {
- BPF_EXIT_INSN(),
- BPF_EXIT_INSN(),
- },
- .errstr = "unreachable",
- .result = REJECT,
- },
- {
- "unreachable2",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "unreachable",
- .result = REJECT,
- },
- {
- "out of range jump",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "out of range jump2",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, -2),
- BPF_EXIT_INSN(),
- },
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "test1 ld_imm64",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid BPF_LD_IMM insn",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "test2 ld_imm64",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid BPF_LD_IMM insn",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "test3 ld_imm64",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_LD_IMM64(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test4 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test5 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test6 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
- BPF_RAW_INSN(0, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "test7 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
- BPF_RAW_INSN(0, 0, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "test8 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
- BPF_RAW_INSN(0, 0, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "uses reserved fields",
- .result = REJECT,
- },
- {
- "test9 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
- BPF_RAW_INSN(0, 0, 0, 1, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test10 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
- BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test11 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
- BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "test12 ld_imm64",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
- BPF_RAW_INSN(0, 0, 0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "not pointing to valid bpf_map",
- .result = REJECT,
- },
- {
- "test13 ld_imm64",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
- BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
- },
- {
- "arsh32 on imm",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "arsh32 on imm 2",
- .insns = {
- BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
- BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = -16069393,
- },
- {
- "arsh32 on reg",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_IMM(BPF_REG_1, 5),
- BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "arsh32 on reg 2",
- .insns = {
- BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
- BPF_MOV64_IMM(BPF_REG_1, 15),
- BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 43724,
- },
- {
- "arsh64 on imm",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "arsh64 on reg",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_IMM(BPF_REG_1, 5),
- BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "no bpf_exit",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
- },
- .errstr = "not an exit",
- .result = REJECT,
- },
- {
- "loop (back-edge)",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "loop2 (back-edge)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -4),
- BPF_EXIT_INSN(),
- },
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "conditional loop",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
- BPF_EXIT_INSN(),
- },
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "read uninitialized register",
- .insns = {
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 !read_ok",
- .result = REJECT,
- },
- {
- "read invalid register",
- .insns = {
- BPF_MOV64_REG(BPF_REG_0, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R15 is invalid",
- .result = REJECT,
- },
- {
- "program doesn't init R0 before exit",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 !read_ok",
- .result = REJECT,
- },
- {
- "program doesn't init R0 before exit in all branches",
- .insns = {
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 !read_ok",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "stack out of bounds",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack",
- .result = REJECT,
- },
- {
- "invalid call insn1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode 8d",
- .result = REJECT,
- },
- {
- "invalid call insn2",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_CALL uses reserved",
- .result = REJECT,
- },
- {
- "invalid function call",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid func unknown#1234567",
- .result = REJECT,
- },
- {
- "uninitialized stack1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 2 },
- .errstr = "invalid indirect read from stack",
- .result = REJECT,
- },
- {
- "uninitialized stack2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid read from stack",
- .result = REJECT,
- },
- {
- "invalid fp arithmetic",
- /* If this gets ever changed, make sure JITs can deal with it. */
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 subtraction from stack pointer",
- .result = REJECT,
- },
- {
- "non-invalid fp arithmetic",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "invalid argument register",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_cgroup_classid),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_cgroup_classid),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 !read_ok",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "non-invalid argument register",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_cgroup_classid),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_cgroup_classid),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "check valid spill/fill",
- .insns = {
- /* spill R1(ctx) into stack */
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- /* fill it back into R2 */
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
- /* should be able to access R0 = *(R2 + 8) */
- /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .retval = POINTER_VALUE,
- },
- {
- "check valid spill/fill, skb mark",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = ACCEPT,
- },
- {
- "check corrupted spill/fill",
- .insns = {
- /* spill R1(ctx) into stack */
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- /* mess up with R1 pointer on stack */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
- /* fill back into R0 is fine for priv.
- * R0 now becomes SCALAR_VALUE.
- */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- /* Load from R0 should fail. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .errstr = "R0 invalid mem access 'inv",
- .result = REJECT,
- },
- {
- "check corrupted spill/fill, LSB",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- },
- {
- "check corrupted spill/fill, MSB",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- },
- {
- "invalid src register in STX",
- .insns = {
- BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R15 is invalid",
- .result = REJECT,
- },
- {
- "invalid dst register in STX",
- .insns = {
- BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R14 is invalid",
- .result = REJECT,
- },
- {
- "invalid dst register in ST",
- .insns = {
- BPF_ST_MEM(BPF_B, 14, -1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R14 is invalid",
- .result = REJECT,
- },
- {
- "invalid src register in LDX",
- .insns = {
- BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R12 is invalid",
- .result = REJECT,
- },
- {
- "invalid dst register in LDX",
- .insns = {
- BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R11 is invalid",
- .result = REJECT,
- },
- {
- "junk insn",
- .insns = {
- BPF_RAW_INSN(0, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode 00",
- .result = REJECT,
- },
- {
- "junk insn2",
- .insns = {
- BPF_RAW_INSN(1, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_LDX uses reserved fields",
- .result = REJECT,
- },
- {
- "junk insn3",
- .insns = {
- BPF_RAW_INSN(-1, 0, 0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode ff",
- .result = REJECT,
- },
- {
- "junk insn4",
- .insns = {
- BPF_RAW_INSN(-1, -1, -1, -1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode ff",
- .result = REJECT,
- },
- {
- "junk insn5",
- .insns = {
- BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_ALU uses reserved fields",
- .result = REJECT,
- },
- {
- "misaligned read from stack",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned stack access",
- .result = REJECT,
- },
- {
- "invalid map_fd for function call",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_delete_elem),
- BPF_EXIT_INSN(),
- },
- .errstr = "fd 0 is not pointing to valid bpf_map",
- .result = REJECT,
- },
- {
- "don't check return value before access",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 invalid mem access 'map_value_or_null'",
- .result = REJECT,
- },
- {
- "access memory with incorrect alignment",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "misaligned value access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "sometimes access memory with incorrect alignment",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 invalid mem access",
- .errstr_unpriv = "R0 leaks addr",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "jump test 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "jump test 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 14),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 11),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 5),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "jump test 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 19),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_JMP_IMM(BPF_JA, 0, 0, 15),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
- BPF_JMP_IMM(BPF_JA, 0, 0, 11),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
- BPF_JMP_IMM(BPF_JA, 0, 0, 7),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
- BPF_JMP_IMM(BPF_JA, 0, 0, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_delete_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 24 },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = -ENOENT,
- },
- {
- "jump test 4",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "jump test 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "access skb fields ok",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, queue_mapping)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, protocol)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_present)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_tci)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, napi_id)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "access skb fields bad1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "access skb fields bad2",
- .insns = {
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "different pointers",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "access skb fields bad3",
- .insns = {
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -12),
- },
- .fixup_map_hash_8b = { 6 },
- .errstr = "different pointers",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "access skb fields bad4",
- .insns = {
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -13),
- },
- .fixup_map_hash_8b = { 7 },
- .errstr = "different pointers",
- .errstr_unpriv = "R1 pointer comparison",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff family",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, family)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff remote_ip4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip4)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff local_ip4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip4)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff remote_ip6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff local_ip6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff remote_port",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_port)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "invalid access __sk_buff remote_port",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_port)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "valid access __sk_buff family",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, family)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff remote_ip4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip4)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff local_ip4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip4)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff remote_ip6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[3])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff local_ip6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[3])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff remote_port",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, remote_port)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access __sk_buff remote_port",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, local_port)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "invalid access of tc_classid for SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- .errstr = "invalid bpf_context access",
- },
- {
- "invalid access of skb->mark for SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- .errstr = "invalid bpf_context access",
- },
- {
- "check skb->mark is not writeable by SK_SKB",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- .errstr = "invalid bpf_context access",
- },
- {
- "check skb->tc_index is writeable by SK_SKB",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tc_index)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "check skb->priority is writeable by SK_SKB",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, priority)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "direct packet read for SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "direct packet write for SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "overlapping checks for direct packet access SK_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access family in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, family)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access remote_ip4 in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip4)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access local_ip4 in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip4)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access remote_port in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_port)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access local_port in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_port)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "valid access remote_ip6 in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, remote_ip6[3])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access local_ip6 in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, local_ip6[3])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_SKB,
- },
- {
- "valid access size in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct sk_msg_md, size)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "invalid 64B read of size in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, size)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "invalid read past end of SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, size) + 4),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "invalid read offset in SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, family) + 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet read for SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, data)),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
- offsetof(struct sk_msg_md, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "direct packet write for SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, data)),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
- offsetof(struct sk_msg_md, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "overlapping checks for direct packet access SK_MSG",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, data)),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
- offsetof(struct sk_msg_md, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SK_MSG,
- },
- {
- "check skb->mark is not writeable by sockets",
- .insns = {
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .errstr_unpriv = "R1 leaks addr",
- .result = REJECT,
- },
- {
- "check skb->tc_index is not writeable by sockets",
- .insns = {
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, tc_index)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .errstr_unpriv = "R1 leaks addr",
- .result = REJECT,
- },
- {
- "check cb access: byte",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 3),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1]) + 3),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2]) + 3),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3]) + 3),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4])),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 1),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3]) + 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 3),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "__sk_buff->hash, offset 0, byte store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, hash)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "__sk_buff->tc_index, offset 3, byte store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tc_index) + 3),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check skb->hash byte load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
-#else
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 3),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash byte load permitted 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash byte load permitted 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash byte load permitted 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 3),
-#else
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check cb access: byte, wrong type",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "check cb access: half",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1]) + 2),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2]) + 2),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3]) + 2),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4])),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3]) + 2),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check cb access: half, unaligned",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check __sk_buff->hash, offset 0, half store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, hash)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check __sk_buff->tc_index, offset 2, half store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tc_index) + 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check skb->hash half load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 2),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash half load permitted 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 2),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check skb->hash half load not permitted, unaligned 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 1),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 3),
-#endif
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check skb->hash half load not permitted, unaligned 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 3),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, hash) + 1),
-#endif
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "check cb access: half, wrong type",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "check cb access: word",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check cb access: word, unaligned 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0]) + 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: word, unaligned 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 1),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: word, unaligned 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: word, unaligned 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4]) + 3),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: double",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check cb access: double, unaligned 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[1])),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: double, unaligned 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_EXIT_INSN(),
- },
- .errstr = "misaligned context access",
- .result = REJECT,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "check cb access: double, oob 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[4])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check cb access: double, oob 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check __sk_buff->ifindex dw store not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, ifindex)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check __sk_buff->ifindex dw load not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, ifindex)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check cb access: double, wrong type",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "check out of range skb->cb access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0]) + 256),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .errstr_unpriv = "",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_ACT,
- },
- {
- "write skb fields from socket prog",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_index)),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .errstr_unpriv = "R1 leaks addr",
- .result_unpriv = REJECT,
- },
- {
- "write skb fields from tc_cls_act prog",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_index)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tc_index)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tstamp)),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tstamp)),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "PTR_TO_STACK store/load",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0xfaceb00c,
- },
- {
- "PTR_TO_STACK store/load - bad alignment on off",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
- },
- {
- "PTR_TO_STACK store/load - bad alignment on reg",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
- },
- {
- "PTR_TO_STACK store/load - out of bounds low",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off=-79992 size=8",
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- },
- {
- "PTR_TO_STACK store/load - out of bounds high",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off=0 size=8",
- },
- {
- "unpriv: return pointer",
- .insns = {
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 leaks addr",
- .retval = POINTER_VALUE,
- },
- {
- "unpriv: add const to pointer",
- .insns = {
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "unpriv: add pointer to pointer",
- .insns = {
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R1 pointer += pointer",
- },
- {
- "unpriv: neg pointer",
- .insns = {
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R1 pointer arithmetic",
- },
- {
- "unpriv: cmp pointer with const",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R1 pointer comparison",
- },
- {
- "unpriv: cmp pointer with pointer",
- .insns = {
- BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R10 pointer comparison",
- },
- {
- "unpriv: check that printk is disallowed",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_trace_printk),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "unknown func bpf_trace_printk#6",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: pass pointer to helper function",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_update_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr_unpriv = "R4 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: indirectly pass pointer on stack to helper function",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "invalid indirect read from stack off -8+0 size 8",
- .result = REJECT,
- },
- {
- "unpriv: mangle pointer on stack 1",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: mangle pointer on stack 2",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "attempt to corrupt spilled",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: read pointer from stack in small chunks",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid size",
- .result = REJECT,
- },
- {
- "unpriv: write pointer into ctx",
- .insns = {
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 leaks addr",
- .result_unpriv = REJECT,
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "unpriv: spill/fill of ctx",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "unpriv: spill/fill of ctx 2",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_hash_recalc),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of ctx 3",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_hash_recalc),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R1 type=fp expected=ctx",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of ctx 4",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
- BPF_REG_0, -8, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_hash_recalc),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R1 type=inv expected=ctx",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 42),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
- offsetof(struct __sk_buff, mark)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "same insn cannot be used with different pointers",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx - ctx and sock",
- .insns = {
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- /* struct bpf_sock *sock = bpf_sock_lookup(...); */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- /* u64 foo; */
- /* void *target = &foo; */
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- /* if (skb == NULL) *target = sock; */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- /* else *target = skb; */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- /* struct __sk_buff *skb = *target; */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- /* skb->mark = 42; */
- BPF_MOV64_IMM(BPF_REG_3, 42),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
- offsetof(struct __sk_buff, mark)),
- /* if (sk) bpf_sk_release(sk) */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "type=ctx expected=sock",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx - leak sock",
- .insns = {
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- /* struct bpf_sock *sock = bpf_sock_lookup(...); */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- /* u64 foo; */
- /* void *target = &foo; */
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- /* if (skb == NULL) *target = sock; */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- /* else *target = skb; */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- /* struct __sk_buff *skb = *target; */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- /* skb->mark = 42; */
- BPF_MOV64_IMM(BPF_REG_3, 42),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- //.errstr = "same insn cannot be used with different pointers",
- .errstr = "Unreleased reference",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx - sock and ctx (read)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- /* struct bpf_sock *sock = bpf_sock_lookup(...); */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- /* u64 foo; */
- /* void *target = &foo; */
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- /* if (skb) *target = skb */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- /* else *target = sock */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- /* struct bpf_sock *sk = *target; */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct bpf_sock, mark)),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "same insn cannot be used with different pointers",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers stx - sock and ctx (write)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- /* struct bpf_sock *sock = bpf_sock_lookup(...); */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- /* u64 foo; */
- /* void *target = &foo; */
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- /* if (skb) *target = skb */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- /* else *target = sock */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- /* struct bpf_sock *sk = *target; */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- /* if (sk) sk->mark = 42; bpf_sk_release(sk); */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_MOV64_IMM(BPF_REG_3, 42),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
- offsetof(struct bpf_sock, mark)),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- //.errstr = "same insn cannot be used with different pointers",
- .errstr = "cannot write into socket",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "unpriv: spill/fill of different pointers ldx",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
- -(__s32)offsetof(struct bpf_perf_event_data,
- sample_period) - 8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
- offsetof(struct bpf_perf_event_data,
- sample_period)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "same insn cannot be used with different pointers",
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "unpriv: write pointer into map elem value",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "alu32: mov u32 const",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_7, 0),
- BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
- BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "unpriv: partial copy of pointer",
- .insns = {
- BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R10 partial copy",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: pass pointer to tail_call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .errstr_unpriv = "R3 leaks addr into helper",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: cmp map pointer with zero",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .errstr_unpriv = "R1 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: write into frame pointer",
- .insns = {
- BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "frame pointer is read only",
- .result = REJECT,
- },
- {
- "unpriv: spill/fill frame pointer",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "frame pointer is read only",
- .result = REJECT,
- },
- {
- "unpriv: cmp of frame pointer",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R10 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: adding of fp",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "unpriv: cmp of stack pointer",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R2 pointer comparison",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "runtime/jit: tail_call within bounds, prog once",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "runtime/jit: tail_call within bounds, prog loop",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 1),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 41,
- },
- {
- "runtime/jit: tail_call within bounds, no prog",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "runtime/jit: tail_call out of bounds",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 256),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "runtime/jit: pass negative index to tail_call",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, -1),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 1 },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "runtime/jit: pass > 32bit index to tail_call",
- .insns = {
- BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 2 },
- .result = ACCEPT,
- .retval = 42,
- /* Verifier rewrite for unpriv skips tail call here. */
- .retval_unpriv = 2,
- },
- {
- "PTR_TO_STACK check high 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check high 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check high 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check high 4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .errstr = "invalid stack off=0 size=1",
- .result = REJECT,
- },
- {
- "PTR_TO_STACK check high 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off",
- },
- {
- "PTR_TO_STACK check high 6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
- BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off",
- },
- {
- "PTR_TO_STACK check high 7",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
- BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .errstr = "fp pointer offset",
- },
- {
- "PTR_TO_STACK check low 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -512),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check low 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 1, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1),
- BPF_EXIT_INSN(),
- },
- .result_unpriv = REJECT,
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK check low 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .errstr = "invalid stack off=-513 size=1",
- .result = REJECT,
- },
- {
- "PTR_TO_STACK check low 4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, INT_MIN),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "math between fp pointer",
- },
- {
- "PTR_TO_STACK check low 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off",
- },
- {
- "PTR_TO_STACK check low 6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
- BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack off",
- },
- {
- "PTR_TO_STACK check low 7",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
- BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .errstr = "fp pointer offset",
- },
- {
- "PTR_TO_STACK mixed reg/k, 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
- BPF_MOV64_IMM(BPF_REG_2, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK mixed reg/k, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
- BPF_MOV64_IMM(BPF_REG_2, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_5, -6),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "PTR_TO_STACK mixed reg/k, 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
- BPF_MOV64_IMM(BPF_REG_2, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = -3,
- },
- {
- "PTR_TO_STACK reg",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_MOV64_IMM(BPF_REG_2, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .result_unpriv = REJECT,
- .errstr_unpriv = "invalid stack off=0 size=1",
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "stack pointer arithmetic",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
- BPF_ST_MEM(0, BPF_REG_2, 4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_ST_MEM(0, BPF_REG_2, 4, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "raw_stack: no skb_load_bytes",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- /* Call to skb_load_bytes() omitted. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid read from stack off -8+0 size 8",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, negative len",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, negative len 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, ~0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, zero len",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack type R3",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, no init",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, init",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, spilled regs around bounds",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
- offsetof(struct __sk_buff, priority)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, spilled regs corruption",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "raw_stack: skb_load_bytes, spilled regs corruption 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
- offsetof(struct __sk_buff, priority)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R3 invalid mem access 'inv'",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "raw_stack: skb_load_bytes, spilled regs + data",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
- offsetof(struct __sk_buff, priority)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack type R3 off=-513 access_size=8",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack type R3 off=-1 access_size=8",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, invalid access 6",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid stack type R3 off=-512 access_size=0",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "raw_stack: skb_load_bytes, large access",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_4, 512),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "context stores via ST",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_ST stores into R1 ctx is not allowed",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "context stores via XADD",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
- BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "BPF_XADD stores into R1 ctx is not allowed",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
- BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access off=76",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- },
- {
- "direct packet access: test4 (write)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test5 (pkt_end >= reg, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test6 (pkt_end >= reg, bad access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid access to packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test7 (pkt_end >= reg, both accesses)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid access to packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test8 (double test, variant 1)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test9 (double test, variant 2)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test10 (write invalid)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid access to packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test11 (shift, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
- BPF_MOV64_IMM(BPF_REG_3, 144),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test12 (and, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
- BPF_MOV64_IMM(BPF_REG_3, 144),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test13 (branches, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_MOV64_IMM(BPF_REG_4, 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
- BPF_MOV64_IMM(BPF_REG_3, 14),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_3, 24),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
- BPF_MOV64_IMM(BPF_REG_5, 12),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test15 (spill with xadd)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
- BPF_MOV64_IMM(BPF_REG_5, 4096),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
- BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 invalid mem access 'inv'",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test16 (arith on data_end)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 pointer arithmetic on pkt_end",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test17 (pruning, alignment)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
- BPF_JMP_A(-6),
- },
- .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- },
- {
- "direct packet access: test18 (imm += pkt_ptr, 1)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_IMM(BPF_REG_0, 8),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test19 (imm += pkt_ptr, 2)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- BPF_MOV64_IMM(BPF_REG_4, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
- BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test20 (x += pkt_ptr, 1)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test21 (x += pkt_ptr, 2)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
- BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test22 (x += pkt_ptr, 3)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
- BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test23 (x += pkt_ptr, 4)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test24 (x += pkt_ptr, 5)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 64),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet access: test25 (marking on <, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -4),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test26 (marking on <, bad access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "direct packet access: test27 (marking on <=, good access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 1,
- },
- {
- "direct packet access: test28 (marking on <=, bad access)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -4),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test1, valid packet_ptr range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_update_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 5 },
- .result_unpriv = ACCEPT,
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test2, unchecked packet_ptr",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test3, variable add",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
- BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 11 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test4, packet_ptr with bad range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 7 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test5, packet_ptr with too short range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 6 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "helper access to packet: test6, cls valid packet_ptr range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_update_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 5 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test7, cls unchecked packet_ptr",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test8, cls variable add",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
- BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 11 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test9, cls packet_ptr with bad range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 7 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test10, cls packet_ptr with too short range",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 6 },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test11, cls unsuitable helper 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_4, 42),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_store_bytes),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "helper access to the packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test12, cls unsuitable helper 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_4, 4),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "helper access to the packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test13, cls helper ok",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test14, cls helper ok sub",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test15, cls helper fail sub",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test16, cls helper fail range 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test17, cls helper fail range 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, -9),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R2 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test18, cls helper fail range 3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, ~0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R2 min value is negative",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test19, cls helper range zero",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test20, pkt end as input",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R1 type=pkt_end expected=fp",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to packet: test21, wrong reg",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_diff),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "prevent map lookup in sockmap",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_sockmap = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
- .prog_type = BPF_PROG_TYPE_SOCK_OPS,
- },
- {
- "prevent map lookup in sockhash",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_sockhash = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
- .prog_type = BPF_PROG_TYPE_SOCK_OPS,
- },
- {
- "prevent map lookup in xskmap",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_xskmap = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "prevent map lookup in stack trace",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_stacktrace = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "prevent map lookup in prog array",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_prog2 = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
- },
- {
- "valid map access into an array with a constant",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "valid map access into an array with a register",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "valid map access into an array with a variable",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "valid map access into an array with a signed variable",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with a constant",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=48 size=8",
- .result = REJECT,
- },
- {
- "invalid map access into an array with a register",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 min value is outside of the array range",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with a variable",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with no floor check",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .errstr = "R0 unbounded memory access",
- .result_unpriv = REJECT,
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with a invalid max check",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .errstr = "invalid access to map value, value_size=48 off=44 size=8",
- .result_unpriv = REJECT,
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid map access into an array with a invalid max check",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3, 11 },
- .errstr = "R0 pointer += pointer",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "direct packet read test#1 for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, pkt_type)),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, queue_mapping)),
- BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
- offsetof(struct __sk_buff, protocol)),
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_present)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "invalid bpf_context access off=76 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "direct packet read test#2 for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_tci)),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, vlan_proto)),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, priority)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
- offsetof(struct __sk_buff, priority)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff,
- ingress_ifindex)),
- BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
- offsetof(struct __sk_buff, tc_index)),
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, hash)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "direct packet read test#3 for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, cb[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, cb[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, cb[3])),
- BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
- offsetof(struct __sk_buff, cb[4])),
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, napi_id)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
- offsetof(struct __sk_buff, cb[1])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
- offsetof(struct __sk_buff, cb[2])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
- offsetof(struct __sk_buff, cb[3])),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
- offsetof(struct __sk_buff, cb[4])),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "direct packet read test#4 for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, family)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip4)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip4)),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
- offsetof(struct __sk_buff, remote_ip6[3])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[0])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[1])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[2])),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, local_ip6[3])),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct __sk_buff, remote_port)),
- BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
- offsetof(struct __sk_buff, local_port)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid access of tc_classid for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid access of data_meta for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data_meta)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid access of flow_keys for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, flow_keys)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid write access to napi_id for CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, napi_id)),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
- offsetof(struct __sk_buff, napi_id)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "valid cgroup storage access",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .result = REJECT,
- .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "fd 1 is not pointing to valid bpf_map",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=64 off=256 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid cgroup storage access 5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 7),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "get_local_storage() doesn't support non-zero flags",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid cgroup storage access 6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "get_local_storage() doesn't support non-zero flags",
- .errstr_unpriv = "R2 leaks addr into helper function",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "valid per-cpu cgroup storage access",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_percpu_cgroup_storage = { 1 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .result = REJECT,
- .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "fd 1 is not pointing to valid bpf_map",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_percpu_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=64 off=256 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid per-cpu cgroup storage access 5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 7),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_percpu_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "get_local_storage() doesn't support non-zero flags",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "invalid per-cpu cgroup storage access 6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_local_storage),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_percpu_cgroup_storage = { 1 },
- .result = REJECT,
- .errstr = "get_local_storage() doesn't support non-zero flags",
- .errstr_unpriv = "R2 leaks addr into helper function",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "write tstamp from CGROUP_SKB",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, tstamp)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "invalid bpf_context access off=152 size=8",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "read tstamp from CGROUP_SKB",
- .insns = {
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tstamp)),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- },
- {
- "multiple registers share map_lookup_elem result",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "alu ops on ptr_to_map_value_or_null, 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "R4 pointer arithmetic on map_value_or_null",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "alu ops on ptr_to_map_value_or_null, 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "R4 pointer arithmetic on map_value_or_null",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "alu ops on ptr_to_map_value_or_null, 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "R4 pointer arithmetic on map_value_or_null",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "invalid memory access with multiple map_lookup_elem calls",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .result = REJECT,
- .errstr = "R4 !read_ok",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "valid indirect map_lookup_elem access with 2nd lookup in branch",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_2, 10),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS
- },
- {
- "invalid map access from else condition",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 unbounded memory access",
- .result = REJECT,
- .errstr_unpriv = "R0 leaks addr",
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "constant register |= constant should keep constant type",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "constant register |= constant should not bypass stack boundary checks",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-48 access_size=58",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "constant register |= constant register should keep constant type",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_MOV64_IMM(BPF_REG_4, 13),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "constant register |= constant register should not bypass stack boundary checks",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_MOV64_IMM(BPF_REG_4, 24),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-48 access_size=58",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "invalid direct packet write for LWT_IN",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "cannot write into packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "invalid direct packet write for LWT_OUT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "cannot write into packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_OUT,
- },
- {
- "direct packet write for LWT_XMIT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
- },
- {
- "direct packet read for LWT_IN",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "direct packet read for LWT_OUT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_OUT,
- },
- {
- "direct packet read for LWT_XMIT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
- },
- {
- "overlapping checks for direct packet access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
- },
- {
- "make headroom for LWT_XMIT",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_2, 34),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
- /* split for s390 to succeed */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_2, 42),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_LWT_XMIT,
- },
- {
- "invalid access of tc_classid for LWT_IN",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- },
- {
- "invalid access of tc_classid for LWT_OUT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- },
- {
- "invalid access of tc_classid for LWT_XMIT",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- },
- {
- "leak pointer into ctx 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 2 },
- .errstr_unpriv = "R2 leaks addr into mem",
- .result_unpriv = REJECT,
- .result = REJECT,
- .errstr = "BPF_XADD stores into R1 ctx is not allowed",
- },
- {
- "leak pointer into ctx 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
- offsetof(struct __sk_buff, cb[0])),
- BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "R10 leaks addr into mem",
- .result_unpriv = REJECT,
- .result = REJECT,
- .errstr = "BPF_XADD stores into R1 ctx is not allowed",
- },
- {
- "leak pointer into ctx 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
- offsetof(struct __sk_buff, cb[0])),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1 },
- .errstr_unpriv = "R2 leaks addr into ctx",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "leak pointer into map val",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr_unpriv = "R6 leaks addr into mem",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "helper access to map: full range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: partial range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: empty range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=0 size=0",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: out-of-bound range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=0 size=56",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: negative range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, -8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): full range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): partial range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): empty range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=4 size=0",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): out-of-bound range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo) + 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=4 size=52",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): negative range (> adjustment)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2, -8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const imm): negative range (< adjustment)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): full range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): partial range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): empty range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R1 min value is outside of the array range",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): out-of-bound range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo) + 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=4 size=52",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): negative range (> adjustment)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, -8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via const reg): negative range (< adjustment)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct test_val, foo)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): full range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): partial range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): empty range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_trace_printk),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R1 min value is outside of the array range",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): no max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R1 unbounded memory access",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to adjusted map (via variable): wrong max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2,
- sizeof(struct test_val) -
- offsetof(struct test_val, foo) + 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=4 size=45",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using <, good access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using <, bad access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 unbounded memory access",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using <=, good access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using <=, bad access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 unbounded memory access",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<, good access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<, good access 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<, bad access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 min value is negative",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<=, good access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<=, good access 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to map: bounds check using s<=, bad access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 min value is negative",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map access: known scalar += value_ptr from different maps",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 5 },
- .fixup_map_array_48b = { 8 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R1 tried to add from different maps",
- .retval = 1,
- },
- {
- "map access: value_ptr -= known scalar from different maps",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 5 },
- .fixup_map_array_48b = { 8 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 min value is outside of the array range",
- .retval = 1,
- },
- {
- "map access: known scalar += value_ptr from different maps, but same value properties",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 5 },
- .fixup_map_array_48b = { 8 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: mixing value pointer and scalar, 1",
- .insns = {
- // load map value pointer into r0 and r2
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
- BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
- BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- // load some number from the map into r1
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- // depending on r1, branch:
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3),
- // branch A
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_JMP_A(2),
- // branch B
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0x100000),
- // common instruction
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- // depending on r1, branch:
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- // branch A
- BPF_JMP_A(4),
- // branch B
- BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
- // verifier follows fall-through
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- // fake-dead code; targeted from branch A to
- // prevent dead code sanitization
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 1 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R2 tried to add from different pointers or scalars",
- .retval = 0,
- },
- {
- "map access: mixing value pointer and scalar, 2",
- .insns = {
- // load map value pointer into r0 and r2
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
- BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
- BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- // load some number from the map into r1
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- // depending on r1, branch:
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- // branch A
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0x100000),
- BPF_JMP_A(2),
- // branch B
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- // common instruction
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- // depending on r1, branch:
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
- // branch A
- BPF_JMP_A(4),
- // branch B
- BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
- // verifier follows fall-through
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- // fake-dead code; targeted from branch A to
- // prevent dead code sanitization
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 1 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R2 tried to add from different maps or paths",
- .retval = 0,
- },
- {
- "sanitation: alu with different scalars",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
- BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
- BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0x100000),
- BPF_JMP_A(2),
- BPF_MOV64_IMM(BPF_REG_2, 42),
- BPF_MOV64_IMM(BPF_REG_3, 0x100001),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 1 },
- .result = ACCEPT,
- .retval = 0x100000,
- },
- {
- "map access: value_ptr += known scalar, upper oob arith, test 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 48),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, upper oob arith, test 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 49),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, upper oob arith, test 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr -= known scalar, lower oob arith, test 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 48),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 min value is outside of the array range",
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- },
- {
- "map access: value_ptr -= known scalar, lower oob arith, test 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 48),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr -= known scalar, lower oob arith, test 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 47),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: known scalar += value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 49),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "invalid access to map value",
- },
- {
- "map access: value_ptr += known scalar, 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "invalid access to map value",
- },
- {
- "map access: value_ptr += known scalar, 4",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_IMM(BPF_REG_1, 5),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, -2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr += known scalar, 5",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 0xabcdef12,
- },
- {
- "map access: value_ptr += known scalar, 6",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 0xabcdef12,
- },
- {
- "map access: unknown scalar += value_ptr, 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: unknown scalar += value_ptr, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 0xabcdef12,
- },
- {
- "map access: unknown scalar += value_ptr, 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 0xabcdef12,
- },
- {
- "map access: unknown scalar += value_ptr, 4",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_1, 19),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 max value is outside of the array range",
- .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range",
- },
- {
- "map access: value_ptr += unknown scalar, 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: value_ptr += unknown scalar, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 0xabcdef12,
- },
- {
- "map access: value_ptr += unknown scalar, 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "map access: value_ptr += value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 pointer += pointer prohibited",
- },
- {
- "map access: known scalar -= value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 tried to subtract pointer from scalar",
- },
- {
- "map access: value_ptr -= known scalar",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_MOV64_IMM(BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 min value is outside of the array range",
- },
- {
- "map access: value_ptr -= known scalar, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_1, 6),
- BPF_MOV64_IMM(BPF_REG_2, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: unknown scalar -= value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R1 tried to subtract pointer from scalar",
- },
- {
- "map access: value_ptr -= unknown scalar",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 min value is negative",
- },
- {
- "map access: value_ptr -= unknown scalar, 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .retval = 1,
- },
- {
- "map access: value_ptr -= value_ptr",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_map_array_48b = { 3 },
- .result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
- .errstr_unpriv = "R0 pointer -= pointer prohibited",
- },
- {
- "map lookup helper access to map",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 8 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map update helper access to map",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map update helper access to map: wrong size",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .fixup_map_hash_16b = { 10 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=8 off=0 size=16",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const imm)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
- offsetof(struct other_val, bar)),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 9 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const imm): out-of-bound 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
- sizeof(struct other_val) - 4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 9 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=12 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const imm): out-of-bound 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 9 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const reg)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- offsetof(struct other_val, bar)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const reg): out-of-bound 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3,
- sizeof(struct other_val) - 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=12 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via const reg): out-of-bound 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, -4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via variable)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct other_val, bar), 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 11 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via variable): no max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 10 },
- .result = REJECT,
- .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map helper access to adjusted map (via variable): wrong max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct other_val, bar) + 1, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_16b = { 3, 11 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=16 off=9 size=8",
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "map element value is preserved across register spilling",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- },
- {
- "map element value or null is marked on register spilling",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- },
- {
- "map element value store of cleared call register",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R1 !read_ok",
- .errstr = "R1 !read_ok",
- .result = REJECT,
- .result_unpriv = REJECT,
- },
- {
- "map element value with unaligned store",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
- BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
- BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
- BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
- BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map element value with unaligned load",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map element value illegal alu op, 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 bitwise operator &= on pointer",
- .result = REJECT,
- },
- {
- "map element value illegal alu op, 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 32-bit pointer arithmetic prohibited",
- .result = REJECT,
- },
- {
- "map element value illegal alu op, 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 pointer arithmetic with /= operator",
- .result = REJECT,
- },
- {
- "map element value illegal alu op, 4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 pointer arithmetic prohibited",
- .errstr = "invalid mem access 'inv'",
- .result = REJECT,
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map element value illegal alu op, 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_IMM(BPF_REG_3, 4096),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 invalid mem access 'inv'",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map element value is preserved across register spilling",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
- offsetof(struct test_val, foo)),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, bitwise AND, zero included",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid indirect read from stack off -64+0 size 64",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-64 access_size=65",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP (signed), correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, bounds + offset",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-64 access_size=65",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, wrong max",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid stack type R1 off=-64 access_size=65",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, no max check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- /* because max wasn't checked, signed min is negative */
- .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP, no min check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid indirect read from stack off -64+0 size 64",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: stack, JMP (signed), no min check",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 min value is negative",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: map, JMP, correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
- sizeof(struct test_val), 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: map, JMP, wrong max",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
- sizeof(struct test_val) + 1, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "invalid access to map value, value_size=48 off=0 size=49",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: map adjusted, JMP, correct bounds",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
- sizeof(struct test_val) - 20, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: map adjusted, JMP, wrong max",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
- sizeof(struct test_val) - 19, 4),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R1 min value is outside of the array range",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 type=inv expected=fp",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_csum_diff),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 0 /* csum_diff of 64-byte packet */,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 type=inv expected=fp",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 type=inv expected=fp",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: 8 bytes leak",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid indirect read from stack off -64+32 size 64",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "helper access to variable memory: 8 bytes no leak (init memory)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "invalid and of negative number",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 max value is outside of the array range",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid range check",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_9, 1),
- BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
- BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
- BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
- BPF_MOV32_IMM(BPF_REG_3, 1),
- BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
- BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
- BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
- BPF_MOV64_REG(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr = "R0 max value is outside of the array range",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "map in map access",
- .insns = {
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_in_map = { 3 },
- .result = ACCEPT,
- },
- {
- "invalid inner map pointer",
- .insns = {
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_in_map = { 3 },
- .errstr = "R1 pointer arithmetic on map_ptr prohibited",
- .result = REJECT,
- },
- {
- "forgot null checking on the inner map pointer",
- .insns = {
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_ST_MEM(0, BPF_REG_10, -4, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_in_map = { 3 },
- .errstr = "R1 type=map_value_or_null expected=map_ptr",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
- BPF_EXIT_INSN(),
- },
- .errstr = "R4 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .errstr = "R5 !read_ok",
- .result = REJECT,
- },
- {
- "ld_abs: check calling conv, r7",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_7, 0),
- BPF_LD_ABS(BPF_W, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "ld_abs: tests on r6 and skb data reload helper",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_6, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_vlan_push),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 42 /* ultimate return value */,
- },
- {
- "ld_ind: check calling conv, r1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R2 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_3, 1),
- BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_4, 1),
- BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
- BPF_EXIT_INSN(),
- },
- .errstr = "R4 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .errstr = "R5 !read_ok",
- .result = REJECT,
- },
- {
- "ld_ind: check calling conv, r7",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_7, 1),
- BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "check bpf_perf_event_data->sample_period byte load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period)),
-#else
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period) + 7),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "check bpf_perf_event_data->sample_period half load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period)),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period) + 6),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "check bpf_perf_event_data->sample_period word load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period)),
-#else
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period) + 4),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "check bpf_perf_event_data->sample_period dword load permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
- offsetof(struct bpf_perf_event_data, sample_period)),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_PERF_EVENT,
- },
- {
- "check skb->data half load not permitted",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data) + 2),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- },
- {
- "check skb->tc_classid half load not permitted for lwt prog",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid)),
-#else
- BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, tc_classid) + 2),
-#endif
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid bpf_context access",
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "bounds checks mixing signed and unsigned, positive bounds",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 2),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
- BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
- BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 4",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 5",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 6",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_6, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_load_bytes),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R4 min value is negative, either use unsigned",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 7",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 8",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 9",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 10",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 11",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
- /* Dead branch. */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 12",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -6),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 13",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, 2),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_7, 1),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 14",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -1),
- BPF_MOV64_IMM(BPF_REG_8, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
- BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
- BPF_JMP_IMM(BPF_JA, 0, 0, -7),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "bounds checks mixing signed and unsigned, variant 15",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
- BPF_MOV64_IMM(BPF_REG_2, -6),
- BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "unbounded min value",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- .result_unpriv = REJECT,
- },
- {
- "subtraction bounds (map value) variant 1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
- BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 max value is outside of the array range",
- .result = REJECT,
- },
- {
- "subtraction bounds (map value) variant 2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
- BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
- .result = REJECT,
- },
- {
- "check subtraction on pointers for unpriv",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
- BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0),
- BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
- BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 1, 9 },
- .result = ACCEPT,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R9 pointer -= pointer prohibited",
- },
- {
- "bounds check based on zero-extended MOV",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- /* r2 = 0x0000'0000'ffff'ffff */
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
- /* r2 = 0 */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
- /* no-op */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- /* access at offset 0 */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT
- },
- {
- "bounds check based on sign-extended MOV. test1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- /* r2 = 0xffff'ffff'ffff'ffff */
- BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
- /* r2 = 0xffff'ffff */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
- /* r0 = <oob pointer> */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- /* access to OOB pointer */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "map_value pointer and 4294967295",
- .result = REJECT
- },
- {
- "bounds check based on sign-extended MOV. test2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- /* r2 = 0xffff'ffff'ffff'ffff */
- BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
- /* r2 = 0xfff'ffff */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
- /* r0 = <oob pointer> */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- /* access to OOB pointer */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 min value is outside of the array range",
- .result = REJECT
- },
- {
- "bounds check based on reg_off + var_off + insn_off. test1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "value_size=8 off=1073741825",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "bounds check based on reg_off + var_off + insn_off. test2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 4 },
- .errstr = "value 1073741823",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "bounds check after truncation of non-boundary-crossing range",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- /* r1 = [0x00, 0xff] */
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- /* r2 = 0x10'0000'0000 */
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
- /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
- /* r1 = [0x00, 0xff] */
- BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
- /* r1 = 0 */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* no-op */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* access at offset 0 */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT
- },
- {
- "bounds check after truncation of boundary-crossing range (1)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- /* r1 = [0x00, 0xff] */
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0xffff'ff80, 0x1'0000'007f] */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0xffff'ff80, 0xffff'ffff] or
- * [0x0000'0000, 0x0000'007f]
- */
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0x00, 0xff] or
- * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = 0 or
- * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* no-op or OOB pointer computation */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- /* not actually fully unbounded, but the bound is very high */
- .errstr = "R0 unbounded memory access",
- .result = REJECT
- },
- {
- "bounds check after truncation of boundary-crossing range (2)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
- /* r1 = [0x00, 0xff] */
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0xffff'ff80, 0x1'0000'007f] */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0xffff'ff80, 0xffff'ffff] or
- * [0x0000'0000, 0x0000'007f]
- * difference to previous test: truncation via MOV32
- * instead of ALU32.
- */
- BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = [0x00, 0xff] or
- * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = 0 or
- * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* no-op or OOB pointer computation */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- /* not actually fully unbounded, but the bound is very high */
- .errstr = "R0 unbounded memory access",
- .result = REJECT
- },
- {
- "bounds check after wrapping 32-bit addition",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- /* r1 = 0x7fff'ffff */
- BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
- /* r1 = 0xffff'fffe */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
- /* r1 = 0 */
- BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
- /* no-op */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* access at offset 0 */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT
- },
- {
- "bounds check after shift with oversized count operand",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_2, 32),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- /* r1 = (u32)1 << (u32)32 = ? */
- BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
- /* r1 = [0x0000, 0xffff] */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
- /* computes unknown pointer, potentially OOB */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 max value is outside of the array range",
- .result = REJECT
- },
- {
- "bounds check after right shift of maybe-negative number",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- /* r1 = [0x00, 0xff] */
- BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- /* r1 = [-0x01, 0xfe] */
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
- /* r1 = 0 or 0xff'ffff'ffff'ffff */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* r1 = 0 or 0xffff'ffff'ffff */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
- /* computes unknown pointer, potentially OOB */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* potentially OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 unbounded memory access",
- .result = REJECT
- },
- {
- "bounds check after 32-bit right shift with 64-bit input",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
- /* r1 = 2 */
- BPF_MOV64_IMM(BPF_REG_1, 2),
- /* r1 = 1<<32 */
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31),
- /* r1 = 0 (NOT 2!) */
- BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
- /* r1 = 0xffff'fffe (NOT 0!) */
- BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
- /* computes OOB pointer */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- /* OOB access */
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- /* exit */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R0 invalid mem access",
- .result = REJECT,
- },
- {
- "bounds check map access with off+size signed 32bit overflow. test1",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "map_value pointer and 2147483646",
- .result = REJECT
- },
- {
- "bounds check map access with off+size signed 32bit overflow. test2",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "pointer offset 1073741822",
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .result = REJECT
- },
- {
- "bounds check map access with off+size signed 32bit overflow. test3",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "pointer offset -1073741822",
- .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
- .result = REJECT
- },
- {
- "bounds check map access with off+size signed 32bit overflow. test4",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_1, 1000000),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "map_value pointer and 1000000000000",
- .result = REJECT
- },
- {
- "pointer/scalar confusion in state equality check (way 1)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_JMP_A(1),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
- BPF_JMP_A(0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 leaks addr as return value"
- },
- {
- "pointer/scalar confusion in state equality check (way 2)",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
- BPF_JMP_A(1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- .result_unpriv = REJECT,
- .errstr_unpriv = "R0 leaks addr as return value"
- },
- {
- "variable-offset ctx access",
- .insns = {
- /* Get an unknown value */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- /* Make it small and 4-byte aligned */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
- /* add it to skb. We now have either &skb->len or
- * &skb->pkt_type, but we don't know which
- */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- /* dereference it */
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "variable ctx access var_off=(0x0; 0x4)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "variable-offset stack access",
- .insns = {
- /* Fill the top 8 bytes of the stack */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- /* Get an unknown value */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- /* Make it small and 4-byte aligned */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
- /* add it to fp. We now have either fp-4 or fp-8, but
- * we don't know which
- */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
- /* dereference it */
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "indirect variable-offset stack access",
- .insns = {
- /* Fill the top 8 bytes of the stack */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- /* Get an unknown value */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- /* Make it small and 4-byte aligned */
- BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
- /* add it to fp. We now have either fp-4 or fp-8, but
- * we don't know which
- */
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
- /* dereference it indirectly */
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 5 },
- .errstr = "variable stack read R2",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "direct stack access with 32-bit wraparound. test1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_EXIT_INSN()
- },
- .errstr = "fp pointer and 2147483647",
- .result = REJECT
- },
- {
- "direct stack access with 32-bit wraparound. test2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_EXIT_INSN()
- },
- .errstr = "fp pointer and 1073741823",
- .result = REJECT
- },
- {
- "direct stack access with 32-bit wraparound. test3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
- BPF_EXIT_INSN()
- },
- .errstr = "fp pointer offset 1073741822",
- .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
- .result = REJECT
- },
- {
- "liveness pruning and write screening",
- .insns = {
- /* Get an unknown value */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- /* branch conditions teach us nothing about R2 */
- BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 !read_ok",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_LWT_IN,
- },
- {
- "varlen_map_value_access pruning",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
- BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 3 },
- .errstr_unpriv = "R0 leaks addr",
- .errstr = "R0 unbounded memory access",
- .result_unpriv = REJECT,
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "invalid 64-bit BPF_END",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 0),
- {
- .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
- .dst_reg = BPF_REG_0,
- .src_reg = 0,
- .off = 0,
- .imm = 32,
- },
- BPF_EXIT_INSN(),
- },
- .errstr = "unknown opcode d7",
- .result = REJECT,
- },
- {
- "XDP, using ifindex from netdev",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, ingress_ifindex)),
- BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .retval = 1,
- },
- {
- "meta access, test1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet, off=-8",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test5",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
- BPF_MOV64_IMM(BPF_REG_2, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_xdp_adjust_meta),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R3 !read_ok",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test7",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test8",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test9",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
- BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test10",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_IMM(BPF_REG_5, 42),
- BPF_MOV64_IMM(BPF_REG_6, 24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "invalid access to packet",
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test11",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_IMM(BPF_REG_5, 42),
- BPF_MOV64_IMM(BPF_REG_6, 24),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
- BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "meta access, test12",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
- BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "arithmetic ops make PTR_TO_CTX unusable",
- .insns = {
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
- offsetof(struct __sk_buff, data) -
- offsetof(struct __sk_buff, mark)),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .errstr = "dereference of modified ctx ptr",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "pkt_end - pkt_start is allowed",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = TEST_DATA_LEN,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "XDP pkt read, pkt_end mangling, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 pointer arithmetic on pkt_end",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "XDP pkt read, pkt_end mangling, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R3 pointer arithmetic on pkt_end",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "XDP pkt read, pkt_data' > pkt_end, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' > pkt_end, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' > pkt_end, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end > pkt_data', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end > pkt_data', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end > pkt_data', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' < pkt_end, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' < pkt_end, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' < pkt_end, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end < pkt_data', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end < pkt_data', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end < pkt_data', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' >= pkt_end, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end >= pkt_data', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end >= pkt_data', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end >= pkt_data', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' <= pkt_end, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end <= pkt_data', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end <= pkt_data', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_end <= pkt_data', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' > pkt_data, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data > pkt_meta', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data > pkt_meta', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data > pkt_meta', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' < pkt_data, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data < pkt_meta', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data < pkt_meta', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data < pkt_meta', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' >= pkt_data, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data >= pkt_meta', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' <= pkt_data, good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data <= pkt_meta', good access",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data_meta)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R1 offset is outside of the packet",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "check deducing bounds from const, 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "check deducing bounds from const, 3",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- },
- {
- "check deducing bounds from const, 5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 6",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 7",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, ~0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "dereference of modified ctx ptr",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "check deducing bounds from const, 8",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, ~0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "dereference of modified ctx ptr",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "check deducing bounds from const, 9",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R0 tried to subtract pointer from scalar",
- },
- {
- "check deducing bounds from const, 10",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
- /* Marks reg as unknown. */
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
- },
- {
- "bpf_exit with invalid return code. test1",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 has value (0x0; 0xffffffff)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 has value (0x0; 0x3)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test4",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test5",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 has value (0x2; 0x0)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test6",
- .insns = {
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 is not a known value (ctx)",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "bpf_exit with invalid return code. test7",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
- BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr = "R0 has unknown scalar value",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
- },
- {
- "calls: basic sanity",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .result = ACCEPT,
- },
- {
- "calls: not on unpriviledged",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "calls: div by 0 in subprog",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV32_IMM(BPF_REG_2, 0),
- BPF_MOV32_IMM(BPF_REG_3, 1),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "calls: multiple ret types in subprog 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_MOV32_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
- },
- {
- "calls: multiple ret types in subprog 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
- offsetof(struct __sk_buff, data)),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 16 },
- .result = REJECT,
- .errstr = "R0 min value is outside of the array range",
- },
- {
- "calls: overlapping caller/callee",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "last insn is not an exit or jmp",
- .result = REJECT,
- },
- {
- "calls: wrong recursive calls",
- .insns = {
- BPF_JMP_IMM(BPF_JA, 0, 0, 4),
- BPF_JMP_IMM(BPF_JA, 0, 0, 4),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "calls: wrong src reg",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "BPF_CALL uses reserved fields",
- .result = REJECT,
- },
- {
- "calls: wrong off value",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "BPF_CALL uses reserved fields",
- .result = REJECT,
- },
- {
- "calls: jump back loop",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn 0 to 0",
- .result = REJECT,
- },
- {
- "calls: conditional call",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "calls: conditional call 2",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .result = ACCEPT,
- },
- {
- "calls: conditional call 3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_JMP_IMM(BPF_JA, 0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, -6),
- BPF_MOV64_IMM(BPF_REG_0, 3),
- BPF_JMP_IMM(BPF_JA, 0, 0, -6),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn",
- .result = REJECT,
- },
- {
- "calls: conditional call 4",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, -5),
- BPF_MOV64_IMM(BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .result = ACCEPT,
- },
- {
- "calls: conditional call 5",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, -6),
- BPF_MOV64_IMM(BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn",
- .result = REJECT,
- },
- {
- "calls: conditional call 6",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn",
- .result = REJECT,
- },
- {
- "calls: using r0 returned by callee",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .result = ACCEPT,
- },
- {
- "calls: using uninit r0 from callee",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "calls: callee is using r1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_ACT,
- .result = ACCEPT,
- .retval = TEST_DATA_LEN,
- },
- {
- "calls: callee using args1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "allowed for root only",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = POINTER_VALUE,
- },
- {
- "calls: callee using wrong args2",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "R2 !read_ok",
- .result = REJECT,
- },
- {
- "calls: callee using two args",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
- offsetof(struct __sk_buff, len)),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
- offsetof(struct __sk_buff, len)),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "allowed for root only",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
- },
- {
- "calls: callee changing pkt pointers",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
- BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- /* clear_all_pkt_pointers() has to walk all frames
- * to make sure that pkt pointers in the caller
- * are cleared when callee is calling a helper that
- * adjusts packet size
- */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_xdp_adjust_head),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "R6 invalid mem access 'inv'",
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: two calls with args",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = TEST_DATA_LEN + TEST_DATA_LEN,
- },
- {
- "calls: calls with stack arith",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "calls: calls with misaligned stack access",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
- .errstr = "misaligned stack access",
- .result = REJECT,
- },
- {
- "calls: calls control flow, jump test",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 43),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 43,
- },
- {
- "calls: calls control flow, jump test 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 43),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "jump out of range from insn 1 to 4",
- .result = REJECT,
- },
- {
- "calls: two calls with bad jump",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range from insn 11 to 9",
- .result = REJECT,
- },
- {
- "calls: recursive call. test1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "calls: recursive call. test2",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge",
- .result = REJECT,
- },
- {
- "calls: unreachable code",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "unreachable insn 6",
- .result = REJECT,
- },
- {
- "calls: invalid call",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "invalid destination",
- .result = REJECT,
- },
- {
- "calls: invalid call 2",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "invalid destination",
- .result = REJECT,
- },
- {
- "calls: jumping across function bodies. test1",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "calls: jumping across function bodies. test2",
- .insns = {
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "jump out of range",
- .result = REJECT,
- },
- {
- "calls: call without exit",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "not an exit",
- .result = REJECT,
- },
- {
- "calls: call into middle of ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "last insn",
- .result = REJECT,
- },
- {
- "calls: call into middle of other call",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "last insn",
- .result = REJECT,
- },
- {
- "calls: ld_abs with changing ctx data in callee",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_vlan_push),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
- .result = REJECT,
- },
- {
- "calls: two calls with bad fallthrough",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "not an exit",
- .result = REJECT,
- },
- {
- "calls: two calls with stack read",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = ACCEPT,
- },
- {
- "calls: two calls with stack write",
- .insns = {
- /* main prog */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
- /* write into stack frame of main prog */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* read from stack frame of main prog */
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = ACCEPT,
- },
- {
- "calls: stack overflow using two frames (pre-call access)",
- .insns = {
- /* prog 1 */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* prog 2 */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "combined stack size",
- .result = REJECT,
- },
- {
- "calls: stack overflow using two frames (post-call access)",
- .insns = {
- /* prog 1 */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_EXIT_INSN(),
-
- /* prog 2 */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "combined stack size",
- .result = REJECT,
- },
- {
- "calls: stack depth check using three frames. test1",
- .insns = {
- /* main */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
- BPF_EXIT_INSN(),
- /* B */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- /* stack_main=32, stack_A=256, stack_B=64
- * and max(main+A, main+A+B) < 512
- */
- .result = ACCEPT,
- },
- {
- "calls: stack depth check using three frames. test2",
- .insns = {
- /* main */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
- BPF_EXIT_INSN(),
- /* B */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- /* stack_main=32, stack_A=64, stack_B=256
- * and max(main+A, main+A+B) < 512
- */
- .result = ACCEPT,
- },
- {
- "calls: stack depth check using three frames. test3",
- .insns = {
- /* main */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
- BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* A */
- BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -3),
- /* B */
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
- BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- /* stack_main=64, stack_A=224, stack_B=256
- * and max(main+A, main+A+B) > 512
- */
- .errstr = "combined stack",
- .result = REJECT,
- },
- {
- "calls: stack depth check using three frames. test4",
- /* void main(void) {
- * func1(0);
- * func1(1);
- * func2(1);
- * }
- * void func1(int alloc_or_recurse) {
- * if (alloc_or_recurse) {
- * frame_pointer[-300] = 1;
- * } else {
- * func2(alloc_or_recurse);
- * }
- * }
- * void func2(int alloc_or_recurse) {
- * if (alloc_or_recurse) {
- * frame_pointer[-300] = 1;
- * }
- * }
- */
- .insns = {
- /* main */
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* A */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_EXIT_INSN(),
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
- BPF_EXIT_INSN(),
- /* B */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = REJECT,
- .errstr = "combined stack",
- },
- {
- "calls: stack depth check using three frames. test5",
- .insns = {
- /* main */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
- BPF_EXIT_INSN(),
- /* A */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
- BPF_EXIT_INSN(),
- /* B */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
- BPF_EXIT_INSN(),
- /* C */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
- BPF_EXIT_INSN(),
- /* D */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
- BPF_EXIT_INSN(),
- /* E */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
- BPF_EXIT_INSN(),
- /* F */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
- BPF_EXIT_INSN(),
- /* G */
- BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
- BPF_EXIT_INSN(),
- /* H */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "call stack",
- .result = REJECT,
- },
- {
- "calls: spill into caller stack frame",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
- BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "cannot spill",
- .result = REJECT,
- },
- {
- "calls: write into caller stack frame",
- .insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- BPF_EXIT_INSN(),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "calls: write into callee stack frame",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .errstr = "cannot return stack pointer",
- .result = REJECT,
- },
- {
- "calls: two calls with stack write and void return",
- .insns = {
- /* main prog */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* write into stack frame of main prog */
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
- BPF_EXIT_INSN(), /* void return */
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .result = ACCEPT,
- },
- {
- "calls: ambiguous return value",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .errstr_unpriv = "allowed for root only",
- .result_unpriv = REJECT,
- .errstr = "R0 !read_ok",
- .result = REJECT,
- },
- {
- "calls: two calls that return map_value",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
-
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- /* fetch secound map_value_ptr from the stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- /* call 3rd function twice */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* first time with fp-8 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- /* second time with fp-16 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- /* lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr into stack frame of main prog */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(), /* return 0 */
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .fixup_map_hash_8b = { 23 },
- .result = ACCEPT,
- },
- {
- "calls: two calls that return map_value with bool condition",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- /* call 3rd function twice */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* first time with fp-8 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- /* second time with fp-16 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- /* fetch secound map_value_ptr from the stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- /* lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(), /* return 0 */
- /* write map_value_ptr into stack frame of main prog */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(), /* return 1 */
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .fixup_map_hash_8b = { 23 },
- .result = ACCEPT,
- },
- {
- "calls: two calls that return map_value with incorrect bool check",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- /* call 3rd function twice */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* first time with fp-8 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- /* second time with fp-16 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- /* fetch secound map_value_ptr from the stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- /* lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(), /* return 0 */
- /* write map_value_ptr into stack frame of main prog */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(), /* return 1 */
- },
- .prog_type = BPF_PROG_TYPE_XDP,
- .fixup_map_hash_8b = { 23 },
- .result = REJECT,
- .errstr = "invalid read from stack off -16+0 size 8",
- },
- {
- "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 1 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=8 off=2 size=8",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 1 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = ACCEPT,
- },
- {
- "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0), // 26
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- /* write map_value_ptr into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
- BPF_JMP_IMM(BPF_JA, 0, 0, -30),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 1 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, -8),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = REJECT,
- .errstr = "invalid access to map value, value_size=8 off=2 size=8",
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: two calls that receive map_value_ptr_or_null via arg. test1",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 1 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = ACCEPT,
- },
- {
- "calls: two calls that receive map_value_ptr_or_null via arg. test2",
- .insns = {
- /* main prog */
- /* pass fp-16, fp-8 into a function */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
- /* 1st lookup from map */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_8, 1),
-
- /* 2nd lookup from map */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
- BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV64_IMM(BPF_REG_9, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_9, 1),
-
- /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_ ## func)
- /* subprog 2 */
- /* if arg2 == 1 do *arg1 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
-
- /* if arg4 == 0 do *arg3 = 0 */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
- /* fetch map_value_ptr from the stack of this function */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
- /* write into map value */
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_8b = { 12, 22 },
- .result = REJECT,
- .errstr = "R0 invalid mem access 'inv'",
- },
- {
- "calls: pkt_ptr spill into caller stack",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- /* now the pkt range is verified, read pkt_ptr from stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = POINTER_VALUE,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- /* Marking is still kept, but not in all cases safe. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- /* now the pkt range is verified, read pkt_ptr from stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "invalid access to packet",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 3",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- /* Marking is still kept and safe here. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* now the pkt range is verified, read pkt_ptr from stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 4",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- /* Check marking propagated. */
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 5",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- /* spill checked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "same insn cannot be used with different",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 6",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- /* spill checked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "R4 invalid mem access",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 7",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- /* spill checked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "R4 invalid mem access",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 8",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
- /* spill checked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: pkt_ptr spill into caller stack 9",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- /* spill unchecked pkt_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_5, 1),
- /* don't read back pkt_ptr from stack here */
- /* write 4 bytes into packet */
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "invalid access to packet",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "calls: caller stack init to zero or map_value_or_null",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- /* fetch map_value_or_null or const_zero from stack */
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- /* store into map_value */
- BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- /* if (ctx == 0) return; */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
- /* else bpf_map_lookup() and *(fp - 8) = r0 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 13 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "calls: stack init to zero and pruning",
- .insns = {
- /* first make allocated_stack 16 byte */
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
- /* now fork the execution such that the false branch
- * of JGT insn will be verified second and it skisp zero
- * init of fp-8 stack slot. If stack liveness marking
- * is missing live_read marks from call map_lookup
- * processing then pruning will incorrectly assume
- * that fp-8 stack slot was unused in the fall-through
- * branch and will accept the program incorrectly
- */
- BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 6 },
- .errstr = "invalid indirect read from stack off -8+0 size 8",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_XDP,
- },
- {
- "calls: two calls returning different map pointers for lookup (hash, array)",
- .insns = {
- /* main prog */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
- BPF_CALL_REL(11),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_CALL_REL(12),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- /* subprog 1 */
- BPF_LD_MAP_FD(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* subprog 2 */
- BPF_LD_MAP_FD(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_hash_48b = { 13 },
- .fixup_map_array_48b = { 16 },
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "calls: two calls returning different map pointers for lookup (hash, map in map)",
- .insns = {
- /* main prog */
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
- BPF_CALL_REL(11),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_CALL_REL(12),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
- offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- /* subprog 1 */
- BPF_LD_MAP_FD(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- /* subprog 2 */
- BPF_LD_MAP_FD(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .fixup_map_in_map = { 16 },
- .fixup_map_array_48b = { 13 },
- .result = REJECT,
- .errstr = "R0 invalid mem access 'map_ptr'",
- },
- {
- "cond: two branches returning different map pointers for lookup (tail, tail)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 5 },
- .fixup_prog2 = { 2 },
- .result_unpriv = REJECT,
- .errstr_unpriv = "tail_call abusing map_ptr",
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "cond: two branches returning same map pointers for lookup (tail, tail)",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
- offsetof(struct __sk_buff, mark)),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_JMP_IMM(BPF_JA, 0, 0, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_IMM(BPF_REG_3, 7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .fixup_prog2 = { 2, 5 },
- .result_unpriv = ACCEPT,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "search pruning: all branches should be verified (nop operation)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_A(1),
- BPF_MOV64_IMM(BPF_REG_4, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
- BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
- BPF_MOV64_IMM(BPF_REG_6, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "R6 invalid mem access 'inv'",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "search pruning: all branches should be verified (invalid stack access)",
- .insns = {
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
- BPF_JMP_A(1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
- BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
- BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .errstr = "invalid read from stack off -16+0 size 8",
- .result = REJECT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "jit: lsh, rsh, arsh by 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_IMM(BPF_REG_1, 0xff),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
- BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
- BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
- BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "jit: mov32 for ldimm64, 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
- BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "jit: mov32 for ldimm64, 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
- BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "jit: various mul tests",
- .insns = {
- BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
- BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
- BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
- BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
- BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
- BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
- BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
- BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
- BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
- BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
- BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
- BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 2,
- },
- {
- "xadd/w check unaligned stack",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "misaligned stack access off",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "xadd/w check unaligned map",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_8b = { 3 },
- .result = REJECT,
- .errstr = "misaligned value access off",
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- },
- {
- "xadd/w check unaligned pkt",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct xdp_md, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct xdp_md, data_end)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
- BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
- BPF_MOV64_IMM(BPF_REG_0, 99),
- BPF_JMP_IMM(BPF_JA, 0, 0, 6),
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
- BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
- BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
- BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
- BPF_EXIT_INSN(),
- },
- .result = REJECT,
- .errstr = "BPF_XADD stores into R2 pkt is not allowed",
- .prog_type = BPF_PROG_TYPE_XDP,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "xadd/w check whether src/dst got mangled, 1",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
- BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 3,
- },
- {
- "xadd/w check whether src/dst got mangled, 2",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
- BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
- BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
- BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
- BPF_EXIT_INSN(),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = 3,
- },
- {
- "bpf_get_stack return R0 within range",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
- BPF_MOV64_IMM(BPF_REG_4, 256),
- BPF_EMIT_CALL(BPF_FUNC_get_stack),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
- BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
- BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_EMIT_CALL(BPF_FUNC_get_stack),
- BPF_EXIT_INSN(),
- },
- .fixup_map_hash_48b = { 4 },
- .result = ACCEPT,
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- },
- {
- "ld_abs: invalid op 1",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_DW, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "unknown opcode",
- },
- {
- "ld_abs: invalid op 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_0, 256),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "unknown opcode",
- },
- {
- "ld_abs: nmap reduced",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
- BPF_MOV32_IMM(BPF_REG_0, 18),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
- BPF_LD_IND(BPF_W, BPF_REG_7, 14),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
- BPF_MOV32_IMM(BPF_REG_0, 280971478),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
- BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
- BPF_LD_ABS(BPF_H, 12),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
- BPF_MOV32_IMM(BPF_REG_0, 22),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
- BPF_LD_IND(BPF_H, BPF_REG_7, 14),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
- BPF_MOV32_IMM(BPF_REG_0, 17366),
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
- BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
- BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
- BPF_MOV32_IMM(BPF_REG_0, 256),
- BPF_EXIT_INSN(),
- BPF_MOV32_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .data = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 256,
- },
- {
- "ld_abs: div + abs, test 1",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 3),
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
- BPF_LD_ABS(BPF_B, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
- BPF_LD_IND(BPF_B, BPF_REG_8, -70),
- BPF_EXIT_INSN(),
- },
- .data = {
- 10, 20, 30, 40, 50,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 10,
- },
- {
- "ld_abs: div + abs, test 2",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 3),
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
- BPF_LD_ABS(BPF_B, 128),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
- BPF_LD_IND(BPF_B, BPF_REG_8, -70),
- BPF_EXIT_INSN(),
- },
- .data = {
- 10, 20, 30, 40, 50,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "ld_abs: div + abs, test 3",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
- BPF_LD_ABS(BPF_B, 3),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .data = {
- 10, 20, 30, 40, 50,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "ld_abs: div + abs, test 4",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
- BPF_LD_ABS(BPF_B, 256),
- BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .data = {
- 10, 20, 30, 40, 50,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "ld_abs: vlan + abs, test 1",
- .insns = { },
- .data = {
- 0x34,
- },
- .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 0xbef,
- },
- {
- "ld_abs: vlan + abs, test 2",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
- BPF_MOV64_IMM(BPF_REG_6, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_MOV64_IMM(BPF_REG_2, 1),
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_skb_vlan_push),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_IMM(BPF_REG_0, 42),
- BPF_EXIT_INSN(),
- },
- .data = {
- 0x34,
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 42,
- },
- {
- "ld_abs: jump around ld_abs",
- .insns = { },
- .data = {
- 10, 11,
- },
- .fill_helper = bpf_fill_jump_around_ld_abs,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 10,
- },
- {
- "ld_dw: xor semi-random 64 bit imms, test 1",
- .insns = { },
- .data = { },
- .fill_helper = bpf_fill_rand_ld_dw,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 4090,
- },
- {
- "ld_dw: xor semi-random 64 bit imms, test 2",
- .insns = { },
- .data = { },
- .fill_helper = bpf_fill_rand_ld_dw,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 2047,
- },
- {
- "ld_dw: xor semi-random 64 bit imms, test 3",
- .insns = { },
- .data = { },
- .fill_helper = bpf_fill_rand_ld_dw,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 511,
- },
- {
- "ld_dw: xor semi-random 64 bit imms, test 4",
- .insns = { },
- .data = { },
- .fill_helper = bpf_fill_rand_ld_dw,
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 5,
- },
- {
- "pass unmodified ctx pointer to helper",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_update),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: leak potential reference",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: leak potential reference on stack",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: leak potential reference on stack 2",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: zero potential reference",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: copy and zero potential references",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking: release reference without check",
- .insns = {
- BPF_SK_LOOKUP,
- /* reference in r0 may be NULL */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=sock_or_null expected=sock",
- .result = REJECT,
- },
- {
- "reference tracking: release reference",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: release reference 2",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_EXIT_INSN(),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: release reference twice",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=inv expected=sock",
- .result = REJECT,
- },
- {
- "reference tracking: release reference twice inside branch",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=inv expected=sock",
- .result = REJECT,
- },
- {
- "reference tracking: alloc, check, free in one subbranch",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
- /* if (offsetof(skb, mark) > data_len) exit; */
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
- offsetof(struct __sk_buff, mark)),
- BPF_SK_LOOKUP,
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
- /* Leak reference in R0 */
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "reference tracking: alloc, check, free in both subbranches",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
- /* if (offsetof(skb, mark) > data_len) exit; */
- BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
- offsetof(struct __sk_buff, mark)),
- BPF_SK_LOOKUP,
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
- },
- {
- "reference tracking in call: free reference in subprog",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "pass modified ctx pointer to helper, 1",
- .insns = {
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_update),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "dereference of modified ctx ptr",
- },
- {
- "pass modified ctx pointer to helper, 2",
- .insns = {
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_socket_cookie),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .result_unpriv = REJECT,
- .result = REJECT,
- .errstr_unpriv = "dereference of modified ctx ptr",
- .errstr = "dereference of modified ctx ptr",
- },
- {
- "pass modified ctx pointer to helper, 3",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_csum_update),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = REJECT,
- .errstr = "variable ctx access var_off=(0x0; 0x4)",
- },
- {
- "mov64 src == dst",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
- // Check bounds are OK
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "mov64 src != dst",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
- // Check bounds are OK
- BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "allocated_stack",
- .insns = {
- BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
- BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
- BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
- BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .result_unpriv = ACCEPT,
- .insn_processed = 15,
- },
- {
- "masking, test out of bounds 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 5),
- BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 3",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 4",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 5",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 6",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 7",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 5),
- BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 8",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 1),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 9",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 10",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 11",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test out of bounds 12",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_1, -1),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test in bounds 1",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 4),
- BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 4,
- },
- {
- "masking, test in bounds 2",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test in bounds 3",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0xfffffffe),
- BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0xfffffffe,
- },
- {
- "masking, test in bounds 4",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0xabcde),
- BPF_MOV32_IMM(BPF_REG_2, 0xabcdef - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0xabcde,
- },
- {
- "masking, test in bounds 5",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 0),
- BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "masking, test in bounds 6",
- .insns = {
- BPF_MOV32_IMM(BPF_REG_1, 46),
- BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 46,
- },
- {
- "masking, test in bounds 7",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, -46),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
- BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 46,
- },
- {
- "masking, test in bounds 8",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_3, -47),
- BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
- BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
- BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
- BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
- BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
- BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
- BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
- BPF_EXIT_INSN(),
- },
- .result = ACCEPT,
- .retval = 0,
- },
- {
- "reference tracking in call: free reference in subprog and outside",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "type=inv expected=sock",
- .result = REJECT,
- },
- {
- "reference tracking in call: alloc & leak reference in subprog",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
- BPF_SK_LOOKUP,
- /* spill unchecked sk_ptr into stack of caller */
- BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking in call: alloc in subprog, release outside",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_SK_LOOKUP,
- BPF_EXIT_INSN(), /* return sk */
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .retval = POINTER_VALUE,
- .result = ACCEPT,
- },
- {
- "reference tracking in call: sk_ptr leak into caller stack",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
- /* spill unchecked sk_ptr into stack of caller */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_SK_LOOKUP,
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "Unreleased reference",
- .result = REJECT,
- },
- {
- "reference tracking in call: sk_ptr spill into caller stack",
- .insns = {
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
-
- /* subprog 1 */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
- BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
- /* spill unchecked sk_ptr into stack of caller */
- BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
- BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
- BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- /* now the sk_ptr is verified, free the reference */
- BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
-
- /* subprog 2 */
- BPF_SK_LOOKUP,
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: allow LD_ABS",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: forbid LD_ABS while holding reference",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_SK_LOOKUP,
- BPF_LD_ABS(BPF_B, 0),
- BPF_LD_ABS(BPF_H, 0),
- BPF_LD_ABS(BPF_W, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
- .result = REJECT,
- },
- {
- "reference tracking: allow LD_IND",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_MOV64_IMM(BPF_REG_7, 1),
- BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 1,
- },
- {
- "reference tracking: forbid LD_IND while holding reference",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_7, 1),
- BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
- .result = REJECT,
- },
- {
- "reference tracking: check reference or tail call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
- BPF_SK_LOOKUP,
- /* if (sk) bpf_sk_release() */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
- /* bpf_tail_call() */
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 17 },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: release reference then tail call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
- BPF_SK_LOOKUP,
- /* if (sk) bpf_sk_release() */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- /* bpf_tail_call() */
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 18 },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: leak possible reference over tail call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
- /* Look up socket and store in REG_6 */
- BPF_SK_LOOKUP,
- /* bpf_tail_call() */
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, 2),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- /* if (sk) bpf_sk_release() */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 16 },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "tail_call would lead to reference leak",
- .result = REJECT,
- },
- {
- "reference tracking: leak checked reference over tail call",
- .insns = {
- BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
- /* Look up socket and store in REG_6 */
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- /* if (!sk) goto end */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
- /* bpf_tail_call() */
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_LD_MAP_FD(BPF_REG_2, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_tail_call),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .fixup_prog1 = { 17 },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "tail_call would lead to reference leak",
- .result = REJECT,
- },
- {
- "reference tracking: mangle and release sock_or_null",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
- .result = REJECT,
- },
- {
- "reference tracking: mangle and release sock",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "R1 pointer arithmetic on sock prohibited",
- .result = REJECT,
- },
- {
- "reference tracking: access member",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "reference tracking: write to member",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_LD_IMM64(BPF_REG_2, 42),
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
- offsetof(struct bpf_sock, mark)),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_LD_IMM64(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "cannot write into socket",
- .result = REJECT,
- },
- {
- "reference tracking: invalid 64-bit access of member",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "invalid bpf_sock access off=0 size=8",
- .result = REJECT,
- },
- {
- "reference tracking: access after release",
- .insns = {
- BPF_SK_LOOKUP,
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "reference tracking: direct access for lookup",
- .insns = {
- /* Check that the packet is at least 64B long */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
- BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
- /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
- BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
- BPF_MOV64_IMM(BPF_REG_4, 0),
- BPF_MOV64_IMM(BPF_REG_5, 0),
- BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_EMIT_CALL(BPF_FUNC_sk_release),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "calls: ctx read at start of subprog",
- .insns = {
- BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
- BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_EXIT_INSN(),
- BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
- .result_unpriv = REJECT,
- .result = ACCEPT,
- },
- {
- "check wire_len is not readable by sockets",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, wire_len)),
- BPF_EXIT_INSN(),
- },
- .errstr = "invalid bpf_context access",
- .result = REJECT,
- },
- {
- "check wire_len is readable by tc classifier",
- .insns = {
- BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
- offsetof(struct __sk_buff, wire_len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- },
- {
- "check wire_len is not writable by tc classifier",
- .insns = {
- BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
- offsetof(struct __sk_buff, wire_len)),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "invalid bpf_context access",
- .errstr_unpriv = "R1 leaks addr",
- .result = REJECT,
- },
- {
- "calls: cross frame pruning",
- .insns = {
- /* r8 = !!random();
- * call pruner()
- * if (r8)
- * do something bad;
- */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_MOV64_IMM(BPF_REG_8, 0),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
- BPF_MOV64_IMM(BPF_REG_8, 1),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "jset: functional",
- .insns = {
- /* r0 = 0 */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- /* prep for direct packet access via r2 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
-
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
-
- /* reg, bit 63 or bit 0 set, taken */
- BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
- BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
- BPF_EXIT_INSN(),
-
- /* reg, bit 62, not taken */
- BPF_LD_IMM64(BPF_REG_8, 0x4000000000000000),
- BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_EXIT_INSN(),
-
- /* imm, any bit set, taken */
- BPF_JMP_IMM(BPF_JSET, BPF_REG_7, -1, 1),
- BPF_EXIT_INSN(),
-
- /* imm, bit 31 set, taken */
- BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
- BPF_EXIT_INSN(),
-
- /* all good - return r0 == 2 */
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .runs = 7,
- .retvals = {
- { .retval = 2,
- .data64 = { (1ULL << 63) | (1U << 31) | (1U << 0), }
- },
- { .retval = 2,
- .data64 = { (1ULL << 63) | (1U << 31), }
- },
- { .retval = 2,
- .data64 = { (1ULL << 31) | (1U << 0), }
- },
- { .retval = 2,
- .data64 = { (__u32)-1, }
- },
- { .retval = 2,
- .data64 = { ~0x4000000000000000ULL, }
- },
- { .retval = 0,
- .data64 = { 0, }
- },
- { .retval = 0,
- .data64 = { ~0ULL, }
- },
- },
- },
- {
- "jset: sign-extend",
- .insns = {
- /* r0 = 0 */
- BPF_MOV64_IMM(BPF_REG_0, 0),
- /* prep for direct packet access via r2 */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct __sk_buff, data)),
- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
- offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
- BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
- BPF_EXIT_INSN(),
-
- BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
-
- BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
- BPF_EXIT_INSN(),
+/* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
+ * value into 0 and does necessary preparation for direct packet access
+ * through r2. The allowed access range is 8 bytes.
+ */
+#define BPF_DIRECT_PKT_R2 \
+ BPF_MOV64_IMM(BPF_REG_0, 0), \
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
+ offsetof(struct __sk_buff, data)), \
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
+ offsetof(struct __sk_buff, data_end)), \
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
+ BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
+ BPF_EXIT_INSN()
+
+/* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
+ * positive u32, and zero-extend it into 64-bit.
+ */
+#define BPF_RAND_UEXT_R7 \
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
+ BPF_FUNC_get_prandom_u32), \
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
+
+/* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
+ * negative u32, and sign-extend it into 64-bit.
+ */
+#define BPF_RAND_SEXT_R7 \
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
+ BPF_FUNC_get_prandom_u32), \
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
- BPF_MOV64_IMM(BPF_REG_0, 2),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .result = ACCEPT,
- .retval = 2,
- .data = { 1, 0, 0, 0, 0, 0, 0, 1, },
- },
- {
- "jset: known const compare",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 1),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .retval_unpriv = 1,
- .result_unpriv = ACCEPT,
- .retval = 1,
- .result = ACCEPT,
- },
- {
- "jset: known const compare bad",
- .insns = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "!read_ok",
- .result_unpriv = REJECT,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "jset: unknown const compare taken",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
- BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "!read_ok",
- .result_unpriv = REJECT,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "jset: unknown const compare not taken",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .errstr_unpriv = "!read_ok",
- .result_unpriv = REJECT,
- .errstr = "!read_ok",
- .result = REJECT,
- },
- {
- "jset: half-known const compare",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .result_unpriv = ACCEPT,
- .result = ACCEPT,
- },
- {
- "jset: range",
- .insns = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
- BPF_FUNC_get_prandom_u32),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3),
- BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1),
- BPF_EXIT_INSN(),
- BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1),
- BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
- BPF_EXIT_INSN(),
- },
- .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
- .result_unpriv = ACCEPT,
- .result = ACCEPT,
- },
+static struct bpf_test tests[] = {
+#define FILL_ARRAY
+#include <verifier/tests.h>
+#undef FILL_ARRAY
};
static int probe_filter_length(const struct bpf_insn *fp)
@@ -15611,19 +347,40 @@ static int probe_filter_length(const struct bpf_insn *fp)
return len + 1;
}
-static int create_map(uint32_t type, uint32_t size_key,
- uint32_t size_value, uint32_t max_elem)
+static bool skip_unsupported_map(enum bpf_map_type map_type)
+{
+ if (!bpf_probe_map_type(map_type, 0)) {
+ printf("SKIP (unsupported map type %d)\n", map_type);
+ skips++;
+ return true;
+ }
+ return false;
+}
+
+static int __create_map(uint32_t type, uint32_t size_key,
+ uint32_t size_value, uint32_t max_elem,
+ uint32_t extra_flags)
{
int fd;
fd = bpf_create_map(type, size_key, size_value, max_elem,
- type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
- if (fd < 0)
+ (type == BPF_MAP_TYPE_HASH ?
+ BPF_F_NO_PREALLOC : 0) | extra_flags);
+ if (fd < 0) {
+ if (skip_unsupported_map(type))
+ return -1;
printf("Failed to create hash map '%s'!\n", strerror(errno));
+ }
return fd;
}
+static int create_map(uint32_t type, uint32_t size_key,
+ uint32_t size_value, uint32_t max_elem)
+{
+ return __create_map(type, size_key, size_value, max_elem, 0);
+}
+
static void update_map(int fd, int index)
{
struct test_val value = {
@@ -15669,6 +426,8 @@ static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
sizeof(int), max_elem, 0);
if (mfd < 0) {
+ if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
+ return -1;
printf("Failed to create prog array '%s'!\n", strerror(errno));
return -1;
}
@@ -15699,15 +458,20 @@ static int create_map_in_map(void)
inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
sizeof(int), 1, 0);
if (inner_map_fd < 0) {
+ if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
+ return -1;
printf("Failed to create array '%s'!\n", strerror(errno));
return inner_map_fd;
}
outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
sizeof(int), inner_map_fd, 1, 0);
- if (outer_map_fd < 0)
+ if (outer_map_fd < 0) {
+ if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
+ return -1;
printf("Failed to create array of maps '%s'!\n",
strerror(errno));
+ }
close(inner_map_fd);
@@ -15722,10 +486,112 @@ static int create_cgroup_storage(bool percpu)
fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
TEST_DATA_LEN, 0, 0);
- if (fd < 0)
+ if (fd < 0) {
+ if (skip_unsupported_map(type))
+ return -1;
printf("Failed to create cgroup storage '%s'!\n",
strerror(errno));
+ }
+
+ return fd;
+}
+
+/* struct bpf_spin_lock {
+ * int val;
+ * };
+ * struct val {
+ * int cnt;
+ * struct bpf_spin_lock l;
+ * };
+ */
+static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
+static __u32 btf_raw_types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* struct bpf_spin_lock */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
+ BTF_MEMBER_ENC(15, 1, 0), /* int val; */
+ /* struct val */ /* [3] */
+ BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
+ BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
+ BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
+};
+
+static int load_btf(void)
+{
+ struct btf_header hdr = {
+ .magic = BTF_MAGIC,
+ .version = BTF_VERSION,
+ .hdr_len = sizeof(struct btf_header),
+ .type_len = sizeof(btf_raw_types),
+ .str_off = sizeof(btf_raw_types),
+ .str_len = sizeof(btf_str_sec),
+ };
+ void *ptr, *raw_btf;
+ int btf_fd;
+
+ ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
+ sizeof(btf_str_sec));
+
+ memcpy(ptr, &hdr, sizeof(hdr));
+ ptr += sizeof(hdr);
+ memcpy(ptr, btf_raw_types, hdr.type_len);
+ ptr += hdr.type_len;
+ memcpy(ptr, btf_str_sec, hdr.str_len);
+ ptr += hdr.str_len;
+
+ btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0);
+ free(raw_btf);
+ if (btf_fd < 0)
+ return -1;
+ return btf_fd;
+}
+
+static int create_map_spin_lock(void)
+{
+ struct bpf_create_map_attr attr = {
+ .name = "test_map",
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .key_size = 4,
+ .value_size = 8,
+ .max_entries = 1,
+ .btf_key_type_id = 1,
+ .btf_value_type_id = 3,
+ };
+ int fd, btf_fd;
+
+ btf_fd = load_btf();
+ if (btf_fd < 0)
+ return -1;
+ attr.btf_fd = btf_fd;
+ fd = bpf_create_map_xattr(&attr);
+ if (fd < 0)
+ printf("Failed to create map with spin_lock\n");
+ return fd;
+}
+static int create_sk_storage_map(void)
+{
+ struct bpf_create_map_attr attr = {
+ .name = "test_map",
+ .map_type = BPF_MAP_TYPE_SK_STORAGE,
+ .key_size = 4,
+ .value_size = 8,
+ .max_entries = 0,
+ .map_flags = BPF_F_NO_PREALLOC,
+ .btf_key_type_id = 1,
+ .btf_value_type_id = 3,
+ };
+ int fd, btf_fd;
+
+ btf_fd = load_btf();
+ if (btf_fd < 0)
+ return -1;
+ attr.btf_fd = btf_fd;
+ fd = bpf_create_map_xattr(&attr);
+ close(attr.btf_fd);
+ if (fd < 0)
+ printf("Failed to create sk_storage_map\n");
return fd;
}
@@ -15747,9 +613,16 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
int *fixup_map_in_map = test->fixup_map_in_map;
int *fixup_cgroup_storage = test->fixup_cgroup_storage;
int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
-
- if (test->fill_helper)
+ int *fixup_map_spin_lock = test->fixup_map_spin_lock;
+ int *fixup_map_array_ro = test->fixup_map_array_ro;
+ int *fixup_map_array_wo = test->fixup_map_array_wo;
+ int *fixup_map_array_small = test->fixup_map_array_small;
+ int *fixup_sk_storage_map = test->fixup_sk_storage_map;
+
+ if (test->fill_helper) {
+ test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
test->fill_helper(test);
+ }
/* Allocating HTs with 1 elem is fine here, since we only test
* for verifier and not do a runtime lookup, so the only thing
@@ -15863,6 +736,49 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
fixup_map_stacktrace++;
} while (*fixup_map_stacktrace);
}
+ if (*fixup_map_spin_lock) {
+ map_fds[13] = create_map_spin_lock();
+ do {
+ prog[*fixup_map_spin_lock].imm = map_fds[13];
+ fixup_map_spin_lock++;
+ } while (*fixup_map_spin_lock);
+ }
+ if (*fixup_map_array_ro) {
+ map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
+ sizeof(struct test_val), 1,
+ BPF_F_RDONLY_PROG);
+ update_map(map_fds[14], 0);
+ do {
+ prog[*fixup_map_array_ro].imm = map_fds[14];
+ fixup_map_array_ro++;
+ } while (*fixup_map_array_ro);
+ }
+ if (*fixup_map_array_wo) {
+ map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
+ sizeof(struct test_val), 1,
+ BPF_F_WRONLY_PROG);
+ update_map(map_fds[15], 0);
+ do {
+ prog[*fixup_map_array_wo].imm = map_fds[15];
+ fixup_map_array_wo++;
+ } while (*fixup_map_array_wo);
+ }
+ if (*fixup_map_array_small) {
+ map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
+ 1, 1, 0);
+ update_map(map_fds[16], 0);
+ do {
+ prog[*fixup_map_array_small].imm = map_fds[16];
+ fixup_map_array_small++;
+ } while (*fixup_map_array_small);
+ }
+ if (*fixup_sk_storage_map) {
+ map_fds[17] = create_sk_storage_map();
+ do {
+ prog[*fixup_sk_storage_map].imm = map_fds[17];
+ fixup_sk_storage_map++;
+ } while (*fixup_sk_storage_map);
+ }
}
static int set_admin(bool admin)
@@ -15928,6 +844,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
int run_errs, run_successes;
int map_fds[MAX_NR_MAPS];
const char *expected_err;
+ int fixup_skips;
__u32 pflags;
int i, err;
@@ -15936,8 +853,19 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
if (!prog_type)
prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ fixup_skips = skips;
do_test_fixup(test, prog_type, prog, map_fds);
- prog_len = probe_filter_length(prog);
+ if (test->fill_insns) {
+ prog = test->fill_insns;
+ prog_len = test->prog_len;
+ } else {
+ prog_len = probe_filter_length(prog);
+ }
+ /* If there were some map skips during fixup due to missing bpf
+ * features, skip this test.
+ */
+ if (fixup_skips != skips)
+ return;
pflags = 0;
if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
@@ -15945,7 +873,12 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
pflags |= BPF_F_ANY_ALIGNMENT;
fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
- "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
+ "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 4);
+ if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
+ printf("SKIP (unsupported program type %d)\n", prog_type);
+ skips++;
+ goto close_fds;
+ }
expected_ret = unpriv && test->result_unpriv != UNDEF ?
test->result_unpriv : test->result;
@@ -16039,6 +972,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
goto fail_log;
}
close_fds:
+ if (test->fill_insns)
+ free(test->fill_insns);
close(fd_prog);
for (i = 0; i < MAX_NR_MAPS; i++)
close(map_fds[i]);
@@ -16099,7 +1034,7 @@ static bool test_as_unpriv(struct bpf_test *test)
static int do_test(bool unpriv, unsigned int from, unsigned int to)
{
- int i, passes = 0, errors = 0, skips = 0;
+ int i, passes = 0, errors = 0;
for (i = from; i < to; i++) {
struct bpf_test *test = &tests[i];
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index 4cdb63bf0521..9a9fc6c9b70b 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -52,6 +52,10 @@ struct ksym *ksym_search(long key)
int start = 0, end = sym_cnt;
int result;
+ /* kallsyms not loaded. return NULL */
+ if (sym_cnt <= 0)
+ return NULL;
+
while (start < end) {
size_t mid = start + (end - start) / 2;
diff --git a/tools/testing/selftests/bpf/urandom_read.c b/tools/testing/selftests/bpf/urandom_read.c
index 9de8b7cb4e6d..db781052758d 100644
--- a/tools/testing/selftests/bpf/urandom_read.c
+++ b/tools/testing/selftests/bpf/urandom_read.c
@@ -7,11 +7,19 @@
#define BUF_SIZE 256
+static __attribute__((noinline))
+void urandom_read(int fd, int count)
+{
+ char buf[BUF_SIZE];
+ int i;
+
+ for (i = 0; i < count; ++i)
+ read(fd, buf, BUF_SIZE);
+}
+
int main(int argc, char *argv[])
{
int fd = open("/dev/urandom", O_RDONLY);
- int i;
- char buf[BUF_SIZE];
int count = 4;
if (fd < 0)
@@ -20,8 +28,7 @@ int main(int argc, char *argv[])
if (argc == 2)
count = atoi(argv[1]);
- for (i = 0; i < count; ++i)
- read(fd, buf, BUF_SIZE);
+ urandom_read(fd, count);
close(fd);
return 0;
diff --git a/tools/testing/selftests/bpf/verifier/.gitignore b/tools/testing/selftests/bpf/verifier/.gitignore
new file mode 100644
index 000000000000..45984a364647
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/.gitignore
@@ -0,0 +1 @@
+tests.h
diff --git a/tools/testing/selftests/bpf/verifier/and.c b/tools/testing/selftests/bpf/verifier/and.c
new file mode 100644
index 000000000000..e0fad1548737
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/and.c
@@ -0,0 +1,50 @@
+{
+ "invalid and of negative number",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 max value is outside of the array range",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid range check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+ BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
+ BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
+ BPF_MOV32_IMM(BPF_REG_3, 1),
+ BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
+ BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+ BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
+ BPF_MOV64_REG(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 max value is outside of the array range",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c
new file mode 100644
index 000000000000..bcb83196e459
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/array_access.c
@@ -0,0 +1,378 @@
+{
+ "valid map access into an array with a constant",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "valid map access into an array with a register",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "valid map access into an array with a variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "valid map access into an array with a signed variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with a constant",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
+ offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=48 size=8",
+ .result = REJECT,
+},
+{
+ "invalid map access into an array with a register",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 min value is outside of the array range",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with a variable",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with no floor check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .errstr = "R0 unbounded memory access",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with a invalid max check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .errstr = "invalid access to map value, value_size=48 off=44 size=8",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid map access into an array with a invalid max check",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3, 11 },
+ .errstr = "R0 pointer += pointer",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "valid read map access into a read-only array 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_ro = { 3 },
+ .result = ACCEPT,
+ .retval = 28,
+},
+{
+ "valid read map access into a read-only array 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_array_ro = { 3 },
+ .result = ACCEPT,
+ .retval = -29,
+},
+{
+ "invalid write map access into a read-only array 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_ro = { 3 },
+ .result = REJECT,
+ .errstr = "write into map forbidden",
+},
+{
+ "invalid write map access into a read-only array 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_skb_load_bytes),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_array_ro = { 4 },
+ .result = REJECT,
+ .errstr = "write into map forbidden",
+},
+{
+ "valid write map access into a write-only array 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_wo = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "valid write map access into a write-only array 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_skb_load_bytes),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_array_wo = { 4 },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "invalid read map access into a write-only array 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_wo = { 3 },
+ .result = REJECT,
+ .errstr = "read from map forbidden",
+},
+{
+ "invalid read map access into a write-only array 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_array_wo = { 3 },
+ .result = REJECT,
+ .errstr = "read from map forbidden",
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic.c b/tools/testing/selftests/bpf/verifier/basic.c
new file mode 100644
index 000000000000..b8d18642653a
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic.c
@@ -0,0 +1,23 @@
+{
+ "empty prog",
+ .insns = {
+ },
+ .errstr = "unknown opcode 00",
+ .result = REJECT,
+},
+{
+ "only exit insn",
+ .insns = {
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+},
+{
+ "no bpf_exit",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
+ },
+ .errstr = "not an exit",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic_call.c b/tools/testing/selftests/bpf/verifier/basic_call.c
new file mode 100644
index 000000000000..a8c6ab4c1622
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic_call.c
@@ -0,0 +1,50 @@
+{
+ "invalid call insn1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode 8d",
+ .result = REJECT,
+},
+{
+ "invalid call insn2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_CALL uses reserved",
+ .result = REJECT,
+},
+{
+ "invalid function call",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid func unknown#1234567",
+ .result = REJECT,
+},
+{
+ "invalid argument register",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "non-invalid argument register",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic_instr.c b/tools/testing/selftests/bpf/verifier/basic_instr.c
new file mode 100644
index 000000000000..ed91a7b9a456
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic_instr.c
@@ -0,0 +1,134 @@
+{
+ "add+sub+mul",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_2, 3),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = -3,
+},
+{
+ "xor32 zero extend check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_2, -1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
+ BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "arsh32 on imm",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "arsh32 on imm 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
+ BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = -16069393,
+},
+{
+ "arsh32 on reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "arsh32 on reg 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
+ BPF_MOV64_IMM(BPF_REG_1, 15),
+ BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 43724,
+},
+{
+ "arsh64 on imm",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "arsh64 on reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "invalid 64-bit BPF_END",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ {
+ .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
+ .dst_reg = BPF_REG_0,
+ .src_reg = 0,
+ .off = 0,
+ .imm = 32,
+ },
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode d7",
+ .result = REJECT,
+},
+{
+ "mov64 src == dst",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
+ // Check bounds are OK
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "mov64 src != dst",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
+ // Check bounds are OK
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic_stack.c b/tools/testing/selftests/bpf/verifier/basic_stack.c
new file mode 100644
index 000000000000..b56f8117c09d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic_stack.c
@@ -0,0 +1,64 @@
+{
+ "stack out of bounds",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack",
+ .result = REJECT,
+},
+{
+ "uninitialized stack1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 2 },
+ .errstr = "invalid indirect read from stack",
+ .result = REJECT,
+},
+{
+ "uninitialized stack2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid read from stack",
+ .result = REJECT,
+},
+{
+ "invalid fp arithmetic",
+ /* If this gets ever changed, make sure JITs can deal with it. */
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 subtraction from stack pointer",
+ .result = REJECT,
+},
+{
+ "non-invalid fp arithmetic",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "misaligned read from stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned stack access",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic_stx_ldx.c b/tools/testing/selftests/bpf/verifier/basic_stx_ldx.c
new file mode 100644
index 000000000000..7a0aab3f2cd2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/basic_stx_ldx.c
@@ -0,0 +1,45 @@
+{
+ "invalid src register in STX",
+ .insns = {
+ BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R15 is invalid",
+ .result = REJECT,
+},
+{
+ "invalid dst register in STX",
+ .insns = {
+ BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R14 is invalid",
+ .result = REJECT,
+},
+{
+ "invalid dst register in ST",
+ .insns = {
+ BPF_ST_MEM(BPF_B, 14, -1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R14 is invalid",
+ .result = REJECT,
+},
+{
+ "invalid src register in LDX",
+ .insns = {
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R12 is invalid",
+ .result = REJECT,
+},
+{
+ "invalid dst register in LDX",
+ .insns = {
+ BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R11 is invalid",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c
new file mode 100644
index 000000000000..d55f476f2237
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bounds.c
@@ -0,0 +1,508 @@
+{
+ "subtraction bounds (map value) variant 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
+ BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 max value is outside of the array range",
+ .result = REJECT,
+},
+{
+ "subtraction bounds (map value) variant 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
+ BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "check subtraction on pointers for unpriv",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1, 9 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R9 pointer -= pointer prohibited",
+},
+{
+ "bounds check based on zero-extended MOV",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ /* r2 = 0x0000'0000'ffff'ffff */
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
+ /* r2 = 0 */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+ /* no-op */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ /* access at offset 0 */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT
+},
+{
+ "bounds check based on sign-extended MOV. test1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ /* r2 = 0xffff'ffff'ffff'ffff */
+ BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+ /* r2 = 0xffff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+ /* r0 = <oob pointer> */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ /* access to OOB pointer */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "map_value pointer and 4294967295",
+ .result = REJECT
+},
+{
+ "bounds check based on sign-extended MOV. test2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ /* r2 = 0xffff'ffff'ffff'ffff */
+ BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+ /* r2 = 0xfff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
+ /* r0 = <oob pointer> */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ /* access to OOB pointer */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 min value is outside of the array range",
+ .result = REJECT
+},
+{
+ "bounds check based on reg_off + var_off + insn_off. test1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "value_size=8 off=1073741825",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "bounds check based on reg_off + var_off + insn_off. test2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "value 1073741823",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "bounds check after truncation of non-boundary-crossing range",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ /* r2 = 0x10'0000'0000 */
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
+ /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ /* r1 = [0x00, 0xff] */
+ BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
+ /* r1 = 0 */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* no-op */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* access at offset 0 */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT
+},
+{
+ "bounds check after truncation of boundary-crossing range (1)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0xffff'ffff] or
+ * [0x0000'0000, 0x0000'007f]
+ */
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0x00, 0xff] or
+ * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = 0 or
+ * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* no-op or OOB pointer computation */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ /* not actually fully unbounded, but the bound is very high */
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT
+},
+{
+ "bounds check after truncation of boundary-crossing range (2)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0x1'0000'007f] */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0xffff'ff80, 0xffff'ffff] or
+ * [0x0000'0000, 0x0000'007f]
+ * difference to previous test: truncation via MOV32
+ * instead of ALU32.
+ */
+ BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = [0x00, 0xff] or
+ * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+ /* r1 = 0 or
+ * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+ */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* no-op or OOB pointer computation */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ /* not actually fully unbounded, but the bound is very high */
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT
+},
+{
+ "bounds check after wrapping 32-bit addition",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ /* r1 = 0x7fff'ffff */
+ BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
+ /* r1 = 0xffff'fffe */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ /* r1 = 0 */
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
+ /* no-op */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* access at offset 0 */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT
+},
+{
+ "bounds check after shift with oversized count operand",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_2, 32),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ /* r1 = (u32)1 << (u32)32 = ? */
+ BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
+ /* r1 = [0x0000, 0xffff] */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
+ /* computes unknown pointer, potentially OOB */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 max value is outside of the array range",
+ .result = REJECT
+},
+{
+ "bounds check after right shift of maybe-negative number",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ /* r1 = [0x00, 0xff] */
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ /* r1 = [-0x01, 0xfe] */
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
+ /* r1 = 0 or 0xff'ffff'ffff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* r1 = 0 or 0xffff'ffff'ffff */
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+ /* computes unknown pointer, potentially OOB */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* potentially OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT
+},
+{
+ "bounds check after 32-bit right shift with 64-bit input",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ /* r1 = 2 */
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ /* r1 = 1<<32 */
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31),
+ /* r1 = 0 (NOT 2!) */
+ BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31),
+ /* r1 = 0xffff'fffe (NOT 0!) */
+ BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2),
+ /* computes OOB pointer */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ /* OOB access */
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ /* exit */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 invalid mem access",
+ .result = REJECT,
+},
+{
+ "bounds check map access with off+size signed 32bit overflow. test1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "map_value pointer and 2147483646",
+ .result = REJECT
+},
+{
+ "bounds check map access with off+size signed 32bit overflow. test2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "pointer offset 1073741822",
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .result = REJECT
+},
+{
+ "bounds check map access with off+size signed 32bit overflow. test3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "pointer offset -1073741822",
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .result = REJECT
+},
+{
+ "bounds check map access with off+size signed 32bit overflow. test4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_1, 1000000),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "map_value pointer and 1000000000000",
+ .result = REJECT
+},
diff --git a/tools/testing/selftests/bpf/verifier/bounds_deduction.c b/tools/testing/selftests/bpf/verifier/bounds_deduction.c
new file mode 100644
index 000000000000..1fd07a4f27ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bounds_deduction.c
@@ -0,0 +1,124 @@
+{
+ "check deducing bounds from const, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "check deducing bounds from const, 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check deducing bounds from const, 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, ~0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check deducing bounds from const, 8",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, ~0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check deducing bounds from const, 9",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 tried to subtract pointer from scalar",
+},
+{
+ "check deducing bounds from const, 10",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
+ /* Marks reg as unknown. */
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
+},
diff --git a/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c
new file mode 100644
index 000000000000..9baca7a75c42
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c
@@ -0,0 +1,406 @@
+{
+ "bounds checks mixing signed and unsigned, positive bounds",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 2),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 5",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_6, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R4 min value is negative, either use unsigned",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 7",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 8",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 9",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 10",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 11",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ /* Dead branch. */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 12",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -6),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 13",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, 2),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 14",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_MOV64_IMM(BPF_REG_8, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -7),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+},
+{
+ "bounds checks mixing signed and unsigned, variant 15",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_MOV64_IMM(BPF_REG_2, -6),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "unbounded min value",
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
new file mode 100644
index 000000000000..f24d50f09dbe
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c
@@ -0,0 +1,44 @@
+{
+ "bpf_get_stack return R0 within range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
+ BPF_MOV64_IMM(BPF_REG_4, 256),
+ BPF_EMIT_CALL(BPF_FUNC_get_stack),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
+ BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_EMIT_CALL(BPF_FUNC_get_stack),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
new file mode 100644
index 000000000000..9093a8f64dc6
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -0,0 +1,2030 @@
+{
+ "calls: basic sanity",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+},
+{
+ "calls: not on unpriviledged",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "calls: div by 0 in subprog",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(BPF_REG_2, 0),
+ BPF_MOV32_IMM(BPF_REG_3, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "calls: multiple ret types in subprog 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+},
+{
+ "calls: multiple ret types in subprog 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
+ offsetof(struct __sk_buff, data)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 16 },
+ .result = REJECT,
+ .errstr = "R0 min value is outside of the array range",
+},
+{
+ "calls: overlapping caller/callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn is not an exit or jmp",
+ .result = REJECT,
+},
+{
+ "calls: wrong recursive calls",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "calls: wrong src reg",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "BPF_CALL uses reserved fields",
+ .result = REJECT,
+},
+{
+ "calls: wrong off value",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "BPF_CALL uses reserved fields",
+ .result = REJECT,
+},
+{
+ "calls: jump back loop",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn 0 to 0",
+ .result = REJECT,
+},
+{
+ "calls: conditional call",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "calls: conditional call 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+},
+{
+ "calls: conditional call 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+},
+{
+ "calls: conditional call 4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+},
+{
+ "calls: conditional call 5",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+},
+{
+ "calls: conditional call 6",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge from insn",
+ .result = REJECT,
+},
+{
+ "calls: using r0 returned by callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+},
+{
+ "calls: using uninit r0 from callee",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "calls: callee is using r1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_ACT,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN,
+},
+{
+ "calls: callee using args1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+},
+{
+ "calls: callee using wrong args2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+},
+{
+ "calls: callee using two args",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, len)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
+ offsetof(struct __sk_buff, len)),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
+},
+{
+ "calls: callee changing pkt pointers",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ /* clear_all_pkt_pointers() has to walk all frames
+ * to make sure that pkt pointers in the caller
+ * are cleared when callee is calling a helper that
+ * adjusts packet size
+ */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R6 invalid mem access 'inv'",
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: ptr null check in subprog",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .fixup_map_hash_48b = { 3 },
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "calls: two calls with args",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN + TEST_DATA_LEN,
+},
+{
+ "calls: calls with stack arith",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "calls: calls with misaligned stack access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+ .errstr = "misaligned stack access",
+ .result = REJECT,
+},
+{
+ "calls: calls control flow, jump test",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 43),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 43,
+},
+{
+ "calls: calls control flow, jump test 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 43),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "jump out of range from insn 1 to 4",
+ .result = REJECT,
+},
+{
+ "calls: two calls with bad jump",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range from insn 11 to 9",
+ .result = REJECT,
+},
+{
+ "calls: recursive call. test1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge",
+ .result = REJECT,
+},
+{
+ "calls: recursive call. test2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "back-edge",
+ .result = REJECT,
+},
+{
+ "calls: unreachable code",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "unreachable insn 6",
+ .result = REJECT,
+},
+{
+ "calls: invalid call",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "invalid destination",
+ .result = REJECT,
+},
+{
+ "calls: invalid call 2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "invalid destination",
+ .result = REJECT,
+},
+{
+ "calls: jumping across function bodies. test1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "calls: jumping across function bodies. test2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "calls: call without exit",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "not an exit",
+ .result = REJECT,
+},
+{
+ "calls: call into middle of ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn",
+ .result = REJECT,
+},
+{
+ "calls: call into middle of other call",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "last insn",
+ .result = REJECT,
+},
+{
+ "calls: ld_abs with changing ctx data in callee",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
+ .result = REJECT,
+},
+{
+ "calls: two calls with bad fallthrough",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .errstr = "not an exit",
+ .result = REJECT,
+},
+{
+ "calls: two calls with stack read",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+},
+{
+ "calls: two calls with stack write",
+ .insns = {
+ /* main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ /* write into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* read from stack frame of main prog */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+},
+{
+ "calls: stack overflow using two frames (pre-call access)",
+ .insns = {
+ /* prog 1 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* prog 2 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "combined stack size",
+ .result = REJECT,
+},
+{
+ "calls: stack overflow using two frames (post-call access)",
+ .insns = {
+ /* prog 1 */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+
+ /* prog 2 */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "combined stack size",
+ .result = REJECT,
+},
+{
+ "calls: stack depth check using three frames. test1",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=32, stack_A=256, stack_B=64
+ * and max(main+A, main+A+B) < 512
+ */
+ .result = ACCEPT,
+},
+{
+ "calls: stack depth check using three frames. test2",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=32, stack_A=64, stack_B=256
+ * and max(main+A, main+A+B) < 512
+ */
+ .result = ACCEPT,
+},
+{
+ "calls: stack depth check using three frames. test3",
+ .insns = {
+ /* main */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ /* B */
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=64, stack_A=224, stack_B=256
+ * and max(main+A, main+A+B) > 512
+ */
+ .errstr = "combined stack",
+ .result = REJECT,
+},
+{
+ "calls: stack depth check using three frames. test4",
+ /* void main(void) {
+ * func1(0);
+ * func1(1);
+ * func2(1);
+ * }
+ * void func1(int alloc_or_recurse) {
+ * if (alloc_or_recurse) {
+ * frame_pointer[-300] = 1;
+ * } else {
+ * func2(alloc_or_recurse);
+ * }
+ * }
+ * void func2(int alloc_or_recurse) {
+ * if (alloc_or_recurse) {
+ * frame_pointer[-300] = 1;
+ * }
+ * }
+ */
+ .insns = {
+ /* main */
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = REJECT,
+ .errstr = "combined stack",
+},
+{
+ "calls: stack depth check using three frames. test5",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
+ BPF_EXIT_INSN(),
+ /* C */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
+ BPF_EXIT_INSN(),
+ /* D */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
+ BPF_EXIT_INSN(),
+ /* E */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
+ BPF_EXIT_INSN(),
+ /* F */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
+ BPF_EXIT_INSN(),
+ /* G */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
+ BPF_EXIT_INSN(),
+ /* H */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "call stack",
+ .result = REJECT,
+},
+{
+ "calls: stack depth check in dead code",
+ .insns = {
+ /* main */
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
+ BPF_EXIT_INSN(),
+ /* C */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
+ BPF_EXIT_INSN(),
+ /* D */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
+ BPF_EXIT_INSN(),
+ /* E */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
+ BPF_EXIT_INSN(),
+ /* F */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
+ BPF_EXIT_INSN(),
+ /* G */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
+ BPF_EXIT_INSN(),
+ /* H */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "call stack",
+ .result = REJECT,
+},
+{
+ "calls: spill into caller stack frame",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "cannot spill",
+ .result = REJECT,
+},
+{
+ "calls: write into caller stack frame",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "calls: write into callee stack frame",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "cannot return stack pointer",
+ .result = REJECT,
+},
+{
+ "calls: two calls with stack write and void return",
+ .insns = {
+ /* main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* write into stack frame of main prog */
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+ BPF_EXIT_INSN(), /* void return */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+},
+{
+ "calls: ambiguous return value",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "allowed for root only",
+ .result_unpriv = REJECT,
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+},
+{
+ "calls: two calls that return map_value",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map_hash_8b = { 23 },
+ .result = ACCEPT,
+},
+{
+ "calls: two calls that return map_value with bool condition",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(), /* return 1 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map_hash_8b = { 23 },
+ .result = ACCEPT,
+},
+{
+ "calls: two calls that return map_value with incorrect bool check",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* call 3rd function twice */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* first time with fp-8 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ /* second time with fp-16 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ /* fetch secound map_value_ptr from the stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ /* lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(), /* return 0 */
+ /* write map_value_ptr into stack frame of main prog */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(), /* return 1 */
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .fixup_map_hash_8b = { 23 },
+ .result = REJECT,
+ .errstr = "invalid read from stack off -16+0 size 8",
+},
+{
+ "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=8 off=2 size=8",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = ACCEPT,
+},
+{
+ "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0), // 26
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* write map_value_ptr into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
+ BPF_JMP_IMM(BPF_JA, 0, 0, -30),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -8),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=8 off=2 size=8",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: two calls that receive map_value_ptr_or_null via arg. test1",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 1 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = ACCEPT,
+},
+{
+ "calls: two calls that receive map_value_ptr_or_null via arg. test2",
+ .insns = {
+ /* main prog */
+ /* pass fp-16, fp-8 into a function */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ /* 1st lookup from map */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+
+ /* 2nd lookup from map */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+
+ /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ /* if arg2 == 1 do *arg1 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+ /* if arg4 == 0 do *arg3 = 0 */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
+ /* fetch map_value_ptr from the stack of this function */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ /* write into map value */
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_8b = { 12, 22 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+},
+{
+ "calls: pkt_ptr spill into caller stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = POINTER_VALUE,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ /* Marking is still kept, but not in all cases safe. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* Marking is still kept and safe here. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* now the pkt range is verified, read pkt_ptr from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* Check marking propagated. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "same insn cannot be used with different",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R4 invalid mem access",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R4 invalid mem access",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 8",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ /* spill checked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: pkt_ptr spill into caller stack 9",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ /* spill unchecked pkt_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ /* don't read back pkt_ptr from stack here */
+ /* write 4 bytes into packet */
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "calls: caller stack init to zero or map_value_or_null",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ /* fetch map_value_or_null or const_zero from stack */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ /* store into map_value */
+ BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ /* if (ctx == 0) return; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
+ /* else bpf_map_lookup() and *(fp - 8) = r0 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 13 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "calls: stack init to zero and pruning",
+ .insns = {
+ /* first make allocated_stack 16 byte */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+ /* now fork the execution such that the false branch
+ * of JGT insn will be verified second and it skisp zero
+ * init of fp-8 stack slot. If stack liveness marking
+ * is missing live_read marks from call map_lookup
+ * processing then pruning will incorrectly assume
+ * that fp-8 stack slot was unused in the fall-through
+ * branch and will accept the program incorrectly
+ */
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 6 },
+ .errstr = "invalid indirect read from stack off -8+0 size 8",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "calls: ctx read at start of subprog",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "calls: cross frame pruning",
+ .insns = {
+ /* r8 = !!random();
+ * call pruner()
+ * if (r8)
+ * do something bad;
+ */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "calls: cross frame pruning - liveness propagation",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_IMM(BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_9, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/cfg.c b/tools/testing/selftests/bpf/verifier/cfg.c
new file mode 100644
index 000000000000..349c0862fb4c
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/cfg.c
@@ -0,0 +1,70 @@
+{
+ "unreachable",
+ .insns = {
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unreachable",
+ .result = REJECT,
+},
+{
+ "unreachable2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unreachable",
+ .result = REJECT,
+},
+{
+ "out of range jump",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "out of range jump2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, -2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "jump out of range",
+ .result = REJECT,
+},
+{
+ "loop (back-edge)",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "back-edge",
+ .result = REJECT,
+},
+{
+ "loop2 (back-edge)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "back-edge",
+ .result = REJECT,
+},
+{
+ "conditional loop",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "back-edge",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c b/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c
new file mode 100644
index 000000000000..6d65fe3e7321
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c
@@ -0,0 +1,72 @@
+{
+ "bpf_exit with invalid return code. test1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 has value (0x0; 0xffffffff)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 has value (0x0; 0x3)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 has value (0x2; 0x0)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 is not a known value (ctx)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "bpf_exit with invalid return code. test7",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 has unknown scalar value",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
diff --git a/tools/testing/selftests/bpf/verifier/cgroup_skb.c b/tools/testing/selftests/bpf/verifier/cgroup_skb.c
new file mode 100644
index 000000000000..52e4c03b076b
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/cgroup_skb.c
@@ -0,0 +1,197 @@
+{
+ "direct packet read test#1 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, queue_mapping)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, protocol)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_present)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "invalid bpf_context access off=76 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "direct packet read test#2 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_tci)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_proto)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, priority)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, priority)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, ingress_ifindex)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "direct packet read test#3 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "direct packet read test#4 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, family)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip4)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip4)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_port)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, local_port)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid access of tc_classid for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid access of data_meta for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data_meta)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid access of flow_keys for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, flow_keys)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid write access to napi_id for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "write tstamp from CGROUP_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "invalid bpf_context access off=152 size=8",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "read tstamp from CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
diff --git a/tools/testing/selftests/bpf/verifier/cgroup_storage.c b/tools/testing/selftests/bpf/verifier/cgroup_storage.c
new file mode 100644
index 000000000000..97057c0a1b8a
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/cgroup_storage.c
@@ -0,0 +1,220 @@
+{
+ "valid cgroup storage access",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "fd 1 is not pointing to valid bpf_map",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=256 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid cgroup storage access 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 7),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid cgroup storage access 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .errstr_unpriv = "R2 leaks addr into helper function",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "valid per-cpu cgroup storage access",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "fd 1 is not pointing to valid bpf_map",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=256 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid per-cpu cgroup storage access 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 7),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "invalid per-cpu cgroup storage access 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_percpu_cgroup_storage = { 1 },
+ .result = REJECT,
+ .errstr = "get_local_storage() doesn't support non-zero flags",
+ .errstr_unpriv = "R2 leaks addr into helper function",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
diff --git a/tools/testing/selftests/bpf/verifier/const_or.c b/tools/testing/selftests/bpf/verifier/const_or.c
new file mode 100644
index 000000000000..84446dfc7c1d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/const_or.c
@@ -0,0 +1,60 @@
+{
+ "constant register |= constant should keep constant type",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "constant register |= constant should not bypass stack boundary checks",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-48 access_size=58",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "constant register |= constant register should keep constant type",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_MOV64_IMM(BPF_REG_4, 13),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "constant register |= constant register should not bypass stack boundary checks",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_MOV64_IMM(BPF_REG_4, 24),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-48 access_size=58",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ctx.c b/tools/testing/selftests/bpf/verifier/ctx.c
new file mode 100644
index 000000000000..92762c08f5e3
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ctx.c
@@ -0,0 +1,93 @@
+{
+ "context stores via ST",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_ST stores into R1 ctx is not allowed",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "context stores via XADD",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
+ BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "arithmetic ops make PTR_TO_CTX unusable",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+ offsetof(struct __sk_buff, data) -
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "dereference of modified ctx ptr",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "pass unmodified ctx pointer to helper",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_csum_update),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "pass modified ctx pointer to helper, 1",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_csum_update),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "dereference of modified ctx ptr",
+},
+{
+ "pass modified ctx pointer to helper, 2",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_socket_cookie),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .errstr_unpriv = "dereference of modified ctx ptr",
+ .errstr = "dereference of modified ctx ptr",
+},
+{
+ "pass modified ctx pointer to helper, 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_csum_update),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "variable ctx access var_off=(0x0; 0x4)",
+},
diff --git a/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c b/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c
new file mode 100644
index 000000000000..c6c69220a569
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c
@@ -0,0 +1,181 @@
+{
+ "valid access family in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, family)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access remote_ip4 in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access local_ip4 in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access remote_port in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_port)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access local_port in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_port)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "valid access remote_ip6 in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, remote_ip6[3])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access local_ip6 in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, local_ip6[3])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access size in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, size)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "invalid 64B read of size in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, size)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "invalid read past end of SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, size) + 4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "invalid read offset in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, family) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet read for SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, data)),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
+ offsetof(struct sk_msg_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "direct packet write for SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, data)),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
+ offsetof(struct sk_msg_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
+{
+ "overlapping checks for direct packet access SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
+ offsetof(struct sk_msg_md, data)),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
+ offsetof(struct sk_msg_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c
new file mode 100644
index 000000000000..b0fda2877119
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c
@@ -0,0 +1,1033 @@
+{
+ "access skb fields ok",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, queue_mapping)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, protocol)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_present)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_tci)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "access skb fields bad1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "access skb fields bad2",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "different pointers",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "access skb fields bad3",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -12),
+ },
+ .fixup_map_hash_8b = { 6 },
+ .errstr = "different pointers",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "access skb fields bad4",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -13),
+ },
+ .fixup_map_hash_8b = { 7 },
+ .errstr = "different pointers",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff family",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, family)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff remote_ip4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff local_ip4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff remote_ip6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff local_ip6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff remote_port",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_port)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "invalid access __sk_buff remote_port",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_port)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "valid access __sk_buff family",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, family)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff remote_ip4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff local_ip4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip4)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff remote_ip6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[3])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff local_ip6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[3])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff remote_port",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_port)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "valid access __sk_buff remote_port",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, local_port)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "invalid access of tc_classid for SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "invalid access of skb->mark for SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "check skb->mark is not writeable by SK_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "check skb->tc_index is writeable by SK_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "check skb->priority is writeable by SK_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, priority)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "direct packet read for SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "direct packet write for SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "overlapping checks for direct packet access SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+},
+{
+ "check skb->mark is not writeable by sockets",
+ .insns = {
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "R1 leaks addr",
+ .result = REJECT,
+},
+{
+ "check skb->tc_index is not writeable by sockets",
+ .insns = {
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "R1 leaks addr",
+ .result = REJECT,
+},
+{
+ "check cb access: byte",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "__sk_buff->hash, offset 0, byte store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, hash)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "__sk_buff->tc_index, offset 3, byte store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tc_index) + 3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check skb->hash byte load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#else
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash byte load permitted 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash byte load permitted 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash byte load permitted 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#else
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check cb access: byte, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "check cb access: half",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check cb access: half, unaligned",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check __sk_buff->hash, offset 0, half store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, hash)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check __sk_buff->tc_index, offset 2, half store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tc_index) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check skb->hash half load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 2),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash half load permitted 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 2),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check skb->hash half load not permitted, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 1),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check skb->hash half load not permitted, unaligned 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 1),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check cb access: half, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "check cb access: word",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check cb access: word, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: word, unaligned 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: word, unaligned 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: word, unaligned 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: double",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "check cb access: double, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: double, unaligned 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned context access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "check cb access: double, oob 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check cb access: double, oob 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check __sk_buff->ifindex dw store not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, ifindex)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check __sk_buff->ifindex dw load not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, ifindex)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check cb access: double, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+},
+{
+ "check out of range skb->cb access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 256),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_ACT,
+},
+{
+ "write skb fields from socket prog",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .errstr_unpriv = "R1 leaks addr",
+ .result_unpriv = REJECT,
+},
+{
+ "write skb fields from tc_cls_act prog",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "check skb->data half load not permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data) + 2),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "read gso_segs from CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, gso_segs)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "write gso_segs from CGROUP_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, gso_segs)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ .errstr = "invalid bpf_context access off=164 size=4",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "read gso_segs from CLS",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, gso_segs)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "check wire_len is not readable by sockets",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "check wire_len is readable by tc classifier",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "check wire_len is not writable by tc classifier",
+ .insns = {
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "R1 leaks addr",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/dead_code.c b/tools/testing/selftests/bpf/verifier/dead_code.c
new file mode 100644
index 000000000000..50a8a63be4ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/dead_code.c
@@ -0,0 +1,159 @@
+{
+ "dead code: start",
+ .insns = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: mid 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: mid 2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 4),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "dead code: end 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: end 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: end 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -5),
+ },
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: tail of main + func",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: tail of main + two functions",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: function in the middle and mid of another func",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 12),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 7),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 7, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 7,
+},
+{
+ "dead code: middle of main before call",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 2, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "dead code: start of a function",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/direct_packet_access.c b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
new file mode 100644
index 000000000000..d5c596fdc4b9
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
@@ -0,0 +1,655 @@
+{
+ "pkt_end - pkt_start is allowed",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = TEST_DATA_LEN,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
+ BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access off=76",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+},
+{
+ "direct packet access: test4 (write)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test5 (pkt_end >= reg, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test6 (pkt_end >= reg, bad access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test7 (pkt_end >= reg, both accesses)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test8 (double test, variant 1)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test9 (double test, variant 2)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test10 (write invalid)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test11 (shift, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 144),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test12 (and, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 144),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test13 (branches, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 14),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 24),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
+ BPF_MOV64_IMM(BPF_REG_5, 12),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test15 (spill with xadd)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_5, 4096),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 invalid mem access 'inv'",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test16 (arith on data_end)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 pointer arithmetic on pkt_end",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test17 (pruning, alignment)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_JMP_A(-6),
+ },
+ .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "direct packet access: test18 (imm += pkt_ptr, 1)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_IMM(BPF_REG_0, 8),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test19 (imm += pkt_ptr, 2)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+ BPF_MOV64_IMM(BPF_REG_4, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
+ BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test20 (x += pkt_ptr, 1)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test21 (x += pkt_ptr, 2)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
+ BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test22 (x += pkt_ptr, 3)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
+ BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test23 (x += pkt_ptr, 4)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test24 (x += pkt_ptr, 5)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 64),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "direct packet access: test25 (marking on <, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -4),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test26 (marking on <, bad access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test27 (marking on <=, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "direct packet access: test28 (marking on <=, bad access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -4),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "direct packet access: test29 (reg > pkt_end in subprog)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c b/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c
new file mode 100644
index 000000000000..698e3779fdd2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c
@@ -0,0 +1,40 @@
+{
+ "direct stack access with 32-bit wraparound. test1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "fp pointer and 2147483647",
+ .result = REJECT
+},
+{
+ "direct stack access with 32-bit wraparound. test2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "fp pointer and 1073741823",
+ .result = REJECT
+},
+{
+ "direct stack access with 32-bit wraparound. test3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "fp pointer offset 1073741822",
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result = REJECT
+},
diff --git a/tools/testing/selftests/bpf/verifier/direct_value_access.c b/tools/testing/selftests/bpf/verifier/direct_value_access.c
new file mode 100644
index 000000000000..b9fb28e8e224
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/direct_value_access.c
@@ -0,0 +1,347 @@
+{
+ "direct map access, write test 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 4242),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "direct map access, write test 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 4242),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "direct map access, write test 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 4242),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "direct map access, write test 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 40),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 4242),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "direct map access, write test 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 32),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 4242),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "direct map access, write test 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 40),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 4, 4242),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = REJECT,
+ .errstr = "R1 min value is outside of the array range",
+},
+{
+ "direct map access, write test 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, -1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 4, 4242),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = REJECT,
+ .errstr = "direct value offset of 4294967295 is not allowed",
+},
+{
+ "direct map access, write test 8",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, -1, 4242),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "direct map access, write test 9",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 48),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 4242),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value pointer",
+},
+{
+ "direct map access, write test 10",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 47),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 4),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "direct map access, write test 11",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 48),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 4),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value pointer",
+},
+{
+ "direct map access, write test 12",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, (1<<29)),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 4),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = REJECT,
+ .errstr = "direct value offset of 536870912 is not allowed",
+},
+{
+ "direct map access, write test 13",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, (1<<29)-1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 4),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value pointer, value_size=48 off=536870911",
+},
+{
+ "direct map access, write test 14",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 47),
+ BPF_LD_MAP_VALUE(BPF_REG_2, 0, 46),
+ BPF_ST_MEM(BPF_H, BPF_REG_2, 0, 0xffff),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1, 3 },
+ .result = ACCEPT,
+ .retval = 0xff,
+},
+{
+ "direct map access, write test 15",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 46),
+ BPF_LD_MAP_VALUE(BPF_REG_2, 0, 46),
+ BPF_ST_MEM(BPF_H, BPF_REG_2, 0, 0xffff),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1, 3 },
+ .result = ACCEPT,
+ .retval = 0xffff,
+},
+{
+ "direct map access, write test 16",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 46),
+ BPF_LD_MAP_VALUE(BPF_REG_2, 0, 47),
+ BPF_ST_MEM(BPF_H, BPF_REG_2, 0, 0xffff),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1, 3 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=48 off=47 size=2",
+},
+{
+ "direct map access, write test 17",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 46),
+ BPF_LD_MAP_VALUE(BPF_REG_2, 0, 46),
+ BPF_ST_MEM(BPF_H, BPF_REG_2, 1, 0xffff),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1, 3 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=48 off=47 size=2",
+},
+{
+ "direct map access, write test 18",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 0),
+ BPF_ST_MEM(BPF_H, BPF_REG_1, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_small = { 1 },
+ .result = REJECT,
+ .errstr = "R1 min value is outside of the array range",
+},
+{
+ "direct map access, write test 19",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 0),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_small = { 1 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "direct map access, write test 20",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_small = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to map value pointer",
+},
+{
+ "direct map access, invalid insn test 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, 0, 1, 0, 47),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = REJECT,
+ .errstr = "invalid bpf_ld_imm64 insn",
+},
+{
+ "direct map access, invalid insn test 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, 1, 0, 0, 47),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = REJECT,
+ .errstr = "BPF_LD_IMM64 uses reserved fields",
+},
+{
+ "direct map access, invalid insn test 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, ~0, 0, 0, 47),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = REJECT,
+ .errstr = "BPF_LD_IMM64 uses reserved fields",
+},
+{
+ "direct map access, invalid insn test 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, 0, ~0, 0, 47),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = REJECT,
+ .errstr = "invalid bpf_ld_imm64 insn",
+},
+{
+ "direct map access, invalid insn test 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_VALUE, ~0, ~0, 0, 47),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = REJECT,
+ .errstr = "invalid bpf_ld_imm64 insn",
+},
+{
+ "direct map access, invalid insn test 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_FD, ~0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = REJECT,
+ .errstr = "BPF_LD_IMM64 uses reserved fields",
+},
+{
+ "direct map access, invalid insn test 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, ~0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = REJECT,
+ .errstr = "invalid bpf_ld_imm64 insn",
+},
+{
+ "direct map access, invalid insn test 8",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_FD, ~0, ~0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = REJECT,
+ .errstr = "invalid bpf_ld_imm64 insn",
+},
+{
+ "direct map access, invalid insn test 9",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 0, 0, 47),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = REJECT,
+ .errstr = "unrecognized bpf_ld_imm64 insn",
+},
diff --git a/tools/testing/selftests/bpf/verifier/div0.c b/tools/testing/selftests/bpf/verifier/div0.c
new file mode 100644
index 000000000000..7685edfbcf71
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/div0.c
@@ -0,0 +1,184 @@
+{
+ "DIV32 by 0, zero check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "DIV32 by 0, zero check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "DIV64 by 0, zero check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "MOD32 by 0, zero check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "MOD32 by 0, zero check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "MOD64 by 0, zero check",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "DIV32 by 0, zero check ok, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 2),
+ BPF_MOV32_IMM(BPF_REG_2, 16),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 8,
+},
+{
+ "DIV32 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV32 by 0, zero check 2, cls",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV64 by 0, zero check, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "MOD32 by 0, zero check ok, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 42),
+ BPF_MOV32_IMM(BPF_REG_1, 3),
+ BPF_MOV32_IMM(BPF_REG_2, 5),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "MOD32 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "MOD32 by 0, zero check 2, cls",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "MOD64 by 0, zero check 1, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, 2),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "MOD64 by 0, zero check 2, cls",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_0, -1),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = -1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/div_overflow.c b/tools/testing/selftests/bpf/verifier/div_overflow.c
new file mode 100644
index 000000000000..acab4f00819f
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/div_overflow.c
@@ -0,0 +1,110 @@
+/* Just make sure that JITs used udiv/umod as otherwise we get
+ * an exception from INT_MIN/-1 overflow similarly as with div
+ * by zero.
+ */
+{
+ "DIV32 overflow, check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV32 overflow, check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV64 overflow, check 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
+ BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "DIV64 overflow, check 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, LLONG_MIN),
+ BPF_ALU64_IMM(BPF_DIV, BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "MOD32 overflow, check 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = INT_MIN,
+},
+{
+ "MOD32 overflow, check 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, INT_MIN),
+ BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = INT_MIN,
+},
+{
+ "MOD64 overflow, check 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "MOD64 overflow, check 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_2, LLONG_MIN),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
new file mode 100644
index 000000000000..1f39d845c64f
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
@@ -0,0 +1,614 @@
+{
+ "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, bitwise AND, zero included",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid indirect read from stack off -64+0 size 64",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-64 access_size=65",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP (signed), correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, bounds + offset",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-64 access_size=65",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid stack type R1 off=-64 access_size=65",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, no max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ /* because max wasn't checked, signed min is negative */
+ .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP, no min check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid indirect read from stack off -64+0 size 64",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: stack, JMP (signed), no min check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: map, JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val), 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: map, JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) + 1, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=0 size=49",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: map adjusted, JMP, correct bounds",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 20, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: map adjusted, JMP, wrong max",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 19, 4),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 type=inv expected=fp",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 0 /* csum_diff of 64-byte packet */,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 type=inv expected=fp",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 type=inv expected=fp",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: 8 bytes leak",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid indirect read from stack off -64+32 size 64",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to variable memory: 8 bytes no leak (init memory)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/helper_packet_access.c b/tools/testing/selftests/bpf/verifier/helper_packet_access.c
new file mode 100644
index 000000000000..ae54587e9829
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/helper_packet_access.c
@@ -0,0 +1,460 @@
+{
+ "helper access to packet: test1, valid packet_ptr range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 5 },
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test2, unchecked packet_ptr",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test3, variable add",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
+ BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 11 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test4, packet_ptr with bad range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 7 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test5, packet_ptr with too short range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 6 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "helper access to packet: test6, cls valid packet_ptr range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 5 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test7, cls unchecked packet_ptr",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test8, cls variable add",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
+ BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 11 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test9, cls packet_ptr with bad range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 7 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test10, cls packet_ptr with too short range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 6 },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test11, cls unsuitable helper 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 42),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_store_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "helper access to the packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test12, cls unsuitable helper 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "helper access to the packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test13, cls helper ok",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test14, cls helper ok sub",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test15, cls helper fail sub",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test16, cls helper fail range 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test17, cls helper fail range 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, -9),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R2 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test18, cls helper fail range 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, ~0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R2 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test19, cls helper range zero",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test20, pkt end as input",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 type=pkt_end expected=fp",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "helper access to packet: test21, wrong reg",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/helper_value_access.c b/tools/testing/selftests/bpf/verifier/helper_value_access.c
new file mode 100644
index 000000000000..7572e403ddb9
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/helper_value_access.c
@@ -0,0 +1,953 @@
+{
+ "helper access to map: full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=0 size=0",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: out-of-bound range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=0 size=56",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: negative range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) - offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=0",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): out-of-bound range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) - offsetof(struct test_val, foo) + 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=52",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): negative range (> adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const imm): negative range (< adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) - offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): out-of-bound range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) -
+ offsetof(struct test_val, foo) + 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=52",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): negative range (> adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via const reg): negative range (< adjustment)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, -1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R2 min value is negative",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): full range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) - offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): partial range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): empty range",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R1 min value is outside of the array range",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): no max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R1 unbounded memory access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to adjusted map (via variable): wrong max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2,
+ sizeof(struct test_val) -
+ offsetof(struct test_val, foo) + 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "invalid access to map value, value_size=48 off=4 size=45",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using <, good access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using <, bad access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 unbounded memory access",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using <=, good access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using <=, bad access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 unbounded memory access",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<, good access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<, good access 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<, bad access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 min value is negative",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<=, good access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<=, good access 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "helper access to map: bounds check using s<=, bad access",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 min value is negative",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map lookup helper access to map",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 8 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map update helper access to map",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map update helper access to map: wrong size",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .fixup_map_hash_16b = { 10 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=8 off=0 size=16",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const imm)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, offsetof(struct other_val, bar)),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 9 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const imm): out-of-bound 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, sizeof(struct other_val) - 4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 9 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=12 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const imm): out-of-bound 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 9 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const reg)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, offsetof(struct other_val, bar)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const reg): out-of-bound 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct other_val) - 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=12 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via const reg): out-of-bound 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, -4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=-4 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via variable)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct other_val, bar), 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 11 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via variable): no max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 10 },
+ .result = REJECT,
+ .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "map helper access to adjusted map (via variable): wrong max check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct other_val, bar) + 1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 3, 11 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=16 off=9 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/int_ptr.c b/tools/testing/selftests/bpf/verifier/int_ptr.c
new file mode 100644
index 000000000000..ca3b4729df66
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/int_ptr.c
@@ -0,0 +1,160 @@
+{
+ "ARG_PTR_TO_LONG uninitialized",
+ .insns = {
+ /* bpf_strtoul arg1 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0x00303036),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+
+ /* bpf_strtoul arg2 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+
+ /* bpf_strtoul arg3 (flags) */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+
+ /* bpf_strtoul arg4 (res) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
+
+ /* bpf_strtoul() */
+ BPF_EMIT_CALL(BPF_FUNC_strtoul),
+
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL,
+ .errstr = "invalid indirect read from stack off -16+0 size 8",
+},
+{
+ "ARG_PTR_TO_LONG half-uninitialized",
+ .insns = {
+ /* bpf_strtoul arg1 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0x00303036),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+
+ /* bpf_strtoul arg2 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+
+ /* bpf_strtoul arg3 (flags) */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+
+ /* bpf_strtoul arg4 (res) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_STX_MEM(BPF_W, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
+
+ /* bpf_strtoul() */
+ BPF_EMIT_CALL(BPF_FUNC_strtoul),
+
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL,
+ .errstr = "invalid indirect read from stack off -16+4 size 8",
+},
+{
+ "ARG_PTR_TO_LONG misaligned",
+ .insns = {
+ /* bpf_strtoul arg1 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0x00303036),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+
+ /* bpf_strtoul arg2 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+
+ /* bpf_strtoul arg3 (flags) */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+
+ /* bpf_strtoul arg4 (res) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -12),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_7, BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
+
+ /* bpf_strtoul() */
+ BPF_EMIT_CALL(BPF_FUNC_strtoul),
+
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL,
+ .errstr = "misaligned stack access off (0x0; 0x0)+-20+0 size 8",
+},
+{
+ "ARG_PTR_TO_LONG size < sizeof(long)",
+ .insns = {
+ /* bpf_strtoul arg1 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -16),
+ BPF_MOV64_IMM(BPF_REG_0, 0x00303036),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+
+ /* bpf_strtoul arg2 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+
+ /* bpf_strtoul arg3 (flags) */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+
+ /* bpf_strtoul arg4 (res) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 12),
+ BPF_STX_MEM(BPF_W, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
+
+ /* bpf_strtoul() */
+ BPF_EMIT_CALL(BPF_FUNC_strtoul),
+
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL,
+ .errstr = "invalid stack type R4 off=-4 access_size=8",
+},
+{
+ "ARG_PTR_TO_LONG initialized",
+ .insns = {
+ /* bpf_strtoul arg1 (buf) */
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0x00303036),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+
+ /* bpf_strtoul arg2 (buf_len) */
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+
+ /* bpf_strtoul arg3 (flags) */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+
+ /* bpf_strtoul arg4 (res) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_7),
+
+ /* bpf_strtoul() */
+ BPF_EMIT_CALL(BPF_FUNC_strtoul),
+
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jit.c b/tools/testing/selftests/bpf/verifier/jit.c
new file mode 100644
index 000000000000..c33adf344fae
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/jit.c
@@ -0,0 +1,107 @@
+{
+ "jit: lsh, rsh, arsh by 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 0xff),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
+ BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jit: mov32 for ldimm64, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
+ BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jit: mov32 for ldimm64, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
+ BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jit: various mul tests",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
+ BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
+ BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
+ BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
+ BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
+ BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
+ BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
+ BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
+ BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
+ BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
+ BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jit: jsgt, jslt",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, 0x80000000ULL),
+ BPF_LD_IMM64(BPF_REG_2, 0x0ULL),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_1, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+
+ BPF_JMP_REG(BPF_JSLT, BPF_REG_2, BPF_REG_1, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c
new file mode 100644
index 000000000000..f0961c58581e
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/jmp32.c
@@ -0,0 +1,746 @@
+{
+ "jset32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ /* reg, high bits shouldn't be tested */
+ BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, -2, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+
+ BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 1ULL << 63, }
+ },
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ { .retval = 2,
+ .data64 = { 1ULL << 63 | 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jset32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_LD_IMM64(BPF_REG_8, 0x8000000000000000),
+ BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+
+ BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
+ BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 1ULL << 63, }
+ },
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ { .retval = 2,
+ .data64 = { 1ULL << 63 | 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jset32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 0x10, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x10, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "jeq32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 2,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { -2, }
+ },
+ { .retval = 2,
+ .data64 = { -1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jeq32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_LD_IMM64(BPF_REG_8, 0x7000000000000001),
+ BPF_JMP32_REG(BPF_JEQ, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 2, }
+ },
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ { .retval = 2,
+ .data64 = { 1ULL << 63 | 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jeq32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, 0x10, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, 0xf, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "jne32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 2,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ { .retval = 0,
+ .data64 = { -1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jne32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
+ BPF_JMP32_REG(BPF_JNE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 1, }
+ },
+ { .retval = 2,
+ .data64 = { 2, }
+ },
+ { .retval = 2,
+ .data64 = { 1ULL << 63 | 2, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jne32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, 0x10, 1),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x10, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "jge32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, UINT_MAX - 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 2,
+ .data64 = { UINT_MAX - 1, }
+ },
+ { .retval = 0,
+ .data64 = { 0, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jge32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, UINT_MAX | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { INT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { (UINT_MAX - 1) | 2ULL << 32, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jge32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jgt32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JGT, BPF_REG_7, UINT_MAX - 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX - 1, }
+ },
+ { .retval = 0,
+ .data64 = { 0, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jgt32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, (UINT_MAX - 1) | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX - 1, }
+ },
+ { .retval = 0,
+ .data64 = { (UINT_MAX - 1) | 2ULL << 32, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jgt32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jle32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, INT_MAX, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { INT_MAX - 1, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 2,
+ .data64 = { INT_MAX, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jle32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, (INT_MAX - 1) | 2ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { INT_MAX | 1ULL << 32, }
+ },
+ { .retval = 2,
+ .data64 = { INT_MAX - 2, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jle32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jlt32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JLT, BPF_REG_7, INT_MAX, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { INT_MAX, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 2,
+ .data64 = { INT_MAX - 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jlt32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, INT_MAX | 2ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { INT_MAX | 1ULL << 32, }
+ },
+ { .retval = 0,
+ .data64 = { UINT_MAX, }
+ },
+ { .retval = 2,
+ .data64 = { (INT_MAX - 1) | 3ULL << 32, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jlt32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jsge32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { 0, }
+ },
+ { .retval = 2,
+ .data64 = { -1, }
+ },
+ { .retval = 0,
+ .data64 = { -2, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsge32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, (__u32)-1 | 2ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { -1, }
+ },
+ { .retval = 2,
+ .data64 = { 0x7fffffff | 1ULL << 32, }
+ },
+ { .retval = 0,
+ .data64 = { -2, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsge32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jsgt32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JSGT, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { (__u32)-2, }
+ },
+ { .retval = 0,
+ .data64 = { -1, }
+ },
+ { .retval = 2,
+ .data64 = { 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsgt32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffffe | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 0,
+ .data64 = { 0x7ffffffe, }
+ },
+ { .retval = 0,
+ .data64 = { 0x1ffffffffULL, }
+ },
+ { .retval = 2,
+ .data64 = { 0x7fffffff, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsgt32: min/max deduction",
+ .insns = {
+ BPF_RAND_SEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, (__u32)(-2) | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, -2, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jsle32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JSLE, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { (__u32)-2, }
+ },
+ { .retval = 2,
+ .data64 = { -1, }
+ },
+ { .retval = 0,
+ .data64 = { 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsle32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffffe | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { 0x7ffffffe, }
+ },
+ { .retval = 2,
+ .data64 = { (__u32)-1, }
+ },
+ { .retval = 0,
+ .data64 = { 0x7fffffff | 2ULL << 32, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jsle32: min/max deduction",
+ .insns = {
+ BPF_RAND_UEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_7, 0x7ffffff0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jslt32: BPF_K",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { (__u32)-2, }
+ },
+ { .retval = 0,
+ .data64 = { -1, }
+ },
+ { .retval = 0,
+ .data64 = { 1, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jslt32: BPF_X",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LD_IMM64(BPF_REG_8, 0x7fffffff | 1ULL << 32),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+ BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 3,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { 0x7ffffffe, }
+ },
+ { .retval = 2,
+ .data64 = { 0xffffffff, }
+ },
+ { .retval = 0,
+ .data64 = { 0x7fffffff | 2ULL << 32, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jslt32: min/max deduction",
+ .insns = {
+ BPF_RAND_SEXT_R7,
+ BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2),
+ BPF_LD_IMM64(BPF_REG_8, (__u32)(-1) | 1ULL << 32),
+ BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jset.c b/tools/testing/selftests/bpf/verifier/jset.c
new file mode 100644
index 000000000000..8dcd4e0383d5
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/jset.c
@@ -0,0 +1,167 @@
+{
+ "jset: functional",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+
+ /* reg, bit 63 or bit 0 set, taken */
+ BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
+ BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+
+ /* reg, bit 62, not taken */
+ BPF_LD_IMM64(BPF_REG_8, 0x4000000000000000),
+ BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* imm, any bit set, taken */
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+
+ /* imm, bit 31 set, taken */
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
+ BPF_EXIT_INSN(),
+
+ /* all good - return r0 == 2 */
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 7,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { (1ULL << 63) | (1U << 31) | (1U << 0), }
+ },
+ { .retval = 2,
+ .data64 = { (1ULL << 63) | (1U << 31), }
+ },
+ { .retval = 2,
+ .data64 = { (1ULL << 31) | (1U << 0), }
+ },
+ { .retval = 2,
+ .data64 = { (__u32)-1, }
+ },
+ { .retval = 2,
+ .data64 = { ~0x4000000000000000ULL, }
+ },
+ { .retval = 0,
+ .data64 = { 0, }
+ },
+ { .retval = 0,
+ .data64 = { ~0ULL, }
+ },
+ },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jset: sign-extend",
+ .insns = {
+ BPF_DIRECT_PKT_R2,
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
+ BPF_EXIT_INSN(),
+
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+ .data = { 1, 0, 0, 0, 0, 0, 0, 1, },
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "jset: known const compare",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .retval_unpriv = 1,
+ .result_unpriv = ACCEPT,
+ .retval = 1,
+ .result = ACCEPT,
+},
+{
+ "jset: known const compare bad",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "!read_ok",
+ .result_unpriv = REJECT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "jset: unknown const compare taken",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "!read_ok",
+ .result_unpriv = REJECT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "jset: unknown const compare not taken",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "!read_ok",
+ .result_unpriv = REJECT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "jset: half-known const compare",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+},
+{
+ "jset: range",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jump.c b/tools/testing/selftests/bpf/verifier/jump.c
new file mode 100644
index 000000000000..6f951d1ff0a4
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/jump.c
@@ -0,0 +1,375 @@
+{
+ "jump test 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "jump test 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 14),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 11),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 5),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "jump test 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 19),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 15),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 11),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 7),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 24 },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = -ENOENT,
+},
+{
+ "jump test 4",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "jump test 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "jump test 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_1, 16),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -20),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "jump test 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 16),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -20),
+ },
+ .result = ACCEPT,
+ .retval = 3,
+},
+{
+ "jump test 8",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_1, 16),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -20),
+ },
+ .result = ACCEPT,
+ .retval = 3,
+},
+{
+ "jump/call test 9",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 16),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -20),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "jump out of range from insn 1 to 4",
+},
+{
+ "jump/call test 10",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 16),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -20),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "last insn is not an exit or jmp",
+},
+{
+ "jump/call test 11",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 2, 26),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -31),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 3,
+},
diff --git a/tools/testing/selftests/bpf/verifier/junk_insn.c b/tools/testing/selftests/bpf/verifier/junk_insn.c
new file mode 100644
index 000000000000..89d690f1992a
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/junk_insn.c
@@ -0,0 +1,45 @@
+{
+ "junk insn",
+ .insns = {
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode 00",
+ .result = REJECT,
+},
+{
+ "junk insn2",
+ .insns = {
+ BPF_RAW_INSN(1, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_LDX uses reserved fields",
+ .result = REJECT,
+},
+{
+ "junk insn3",
+ .insns = {
+ BPF_RAW_INSN(-1, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode ff",
+ .result = REJECT,
+},
+{
+ "junk insn4",
+ .insns = {
+ BPF_RAW_INSN(-1, -1, -1, -1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "unknown opcode ff",
+ .result = REJECT,
+},
+{
+ "junk insn5",
+ .insns = {
+ BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "BPF_ALU uses reserved fields",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ld_abs.c b/tools/testing/selftests/bpf/verifier/ld_abs.c
new file mode 100644
index 000000000000..f6599d2ec22d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ld_abs.c
@@ -0,0 +1,286 @@
+{
+ "ld_abs: check calling conv, r1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R4 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R5 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_abs: check calling conv, r7",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_7, 0),
+ BPF_LD_ABS(BPF_W, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "ld_abs: tests on r6 and skb data reload helper",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 42 /* ultimate return value */,
+},
+{
+ "ld_abs: invalid op 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_DW, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "unknown opcode",
+},
+{
+ "ld_abs: invalid op 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 256),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_IND(BPF_DW, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "unknown opcode",
+},
+{
+ "ld_abs: nmap reduced",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26),
+ BPF_MOV32_IMM(BPF_REG_0, 18),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64),
+ BPF_LD_IND(BPF_W, BPF_REG_7, 14),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60),
+ BPF_MOV32_IMM(BPF_REG_0, 280971478),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60),
+ BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15),
+ BPF_LD_ABS(BPF_H, 12),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13),
+ BPF_MOV32_IMM(BPF_REG_0, 22),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56),
+ BPF_LD_IND(BPF_H, BPF_REG_7, 14),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52),
+ BPF_MOV32_IMM(BPF_REG_0, 17366),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52),
+ BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV32_IMM(BPF_REG_0, 256),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 256,
+},
+{
+ "ld_abs: div + abs, test 1",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 3),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
+ BPF_LD_ABS(BPF_B, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+ BPF_LD_IND(BPF_B, BPF_REG_8, -70),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 10, 20, 30, 40, 50,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 10,
+},
+{
+ "ld_abs: div + abs, test 2",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 3),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0),
+ BPF_LD_ABS(BPF_B, 128),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+ BPF_LD_IND(BPF_B, BPF_REG_8, -70),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 10, 20, 30, 40, 50,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "ld_abs: div + abs, test 3",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
+ BPF_LD_ABS(BPF_B, 3),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 10, 20, 30, 40, 50,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "ld_abs: div + abs, test 4",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
+ BPF_LD_ABS(BPF_B, 256),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 10, 20, 30, 40, 50,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "ld_abs: vlan + abs, test 1",
+ .insns = { },
+ .data = {
+ 0x34,
+ },
+ .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 0xbef,
+},
+{
+ "ld_abs: vlan + abs, test 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_skb_vlan_push),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .data = {
+ 0x34,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "ld_abs: jump around ld_abs",
+ .insns = { },
+ .data = {
+ 10, 11,
+ },
+ .fill_helper = bpf_fill_jump_around_ld_abs,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 10,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ld_dw.c b/tools/testing/selftests/bpf/verifier/ld_dw.c
new file mode 100644
index 000000000000..0f18e62f0099
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ld_dw.c
@@ -0,0 +1,45 @@
+{
+ "ld_dw: xor semi-random 64 bit imms, test 1",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 4090,
+},
+{
+ "ld_dw: xor semi-random 64 bit imms, test 2",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2047,
+},
+{
+ "ld_dw: xor semi-random 64 bit imms, test 3",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 511,
+},
+{
+ "ld_dw: xor semi-random 64 bit imms, test 4",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 5,
+},
+{
+ "ld_dw: xor semi-random 64 bit imms, test 5",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1000000 - 6,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ld_imm64.c b/tools/testing/selftests/bpf/verifier/ld_imm64.c
new file mode 100644
index 000000000000..3856dba733e9
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ld_imm64.c
@@ -0,0 +1,154 @@
+{
+ "test1 ld_imm64",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid BPF_LD_IMM insn",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "test2 ld_imm64",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid BPF_LD_IMM insn",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
+{
+ "test3 ld_imm64",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test4 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test5 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test6 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "test7 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, 0, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "test8 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
+ BPF_RAW_INSN(0, 0, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "uses reserved fields",
+ .result = REJECT,
+},
+{
+ "test9 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, 0, 0, 1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test10 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test11 ld_imm64",
+ .insns = {
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
+ BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test12 ld_imm64",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "not pointing to valid bpf_map",
+ .result = REJECT,
+},
+{
+ "test13 ld_imm64",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_ld_imm64 insn",
+ .result = REJECT,
+},
+{
+ "test14 ld_imm64: reject 2nd imm != 0",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_1,
+ BPF_PSEUDO_MAP_FD, 0, 0),
+ BPF_RAW_INSN(0, 0, 0, 0, 0xfefefe),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 1 },
+ .errstr = "unrecognized bpf_ld_imm64 insn",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ld_ind.c b/tools/testing/selftests/bpf/verifier/ld_ind.c
new file mode 100644
index 000000000000..079734227538
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ld_ind.c
@@ -0,0 +1,72 @@
+{
+ "ld_ind: check calling conv, r1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R4 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_5, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R5 !read_ok",
+ .result = REJECT,
+},
+{
+ "ld_ind: check calling conv, r7",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/leak_ptr.c b/tools/testing/selftests/bpf/verifier/leak_ptr.c
new file mode 100644
index 000000000000..d6eec17f2cd2
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/leak_ptr.c
@@ -0,0 +1,67 @@
+{
+ "leak pointer into ctx 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 2 },
+ .errstr_unpriv = "R2 leaks addr into mem",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+},
+{
+ "leak pointer into ctx 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R10 leaks addr into mem",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .errstr = "BPF_XADD stores into R1 ctx is not allowed",
+},
+{
+ "leak pointer into ctx 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .errstr_unpriv = "R2 leaks addr into ctx",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "leak pointer into map val",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr_unpriv = "R6 leaks addr into mem",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/lwt.c b/tools/testing/selftests/bpf/verifier/lwt.c
new file mode 100644
index 000000000000..2cab6a3966bb
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/lwt.c
@@ -0,0 +1,189 @@
+{
+ "invalid direct packet write for LWT_IN",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "cannot write into packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "invalid direct packet write for LWT_OUT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "cannot write into packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_OUT,
+},
+{
+ "direct packet write for LWT_XMIT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+},
+{
+ "direct packet read for LWT_IN",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "direct packet read for LWT_OUT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_OUT,
+},
+{
+ "direct packet read for LWT_XMIT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+},
+{
+ "overlapping checks for direct packet access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+},
+{
+ "make headroom for LWT_XMIT",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
+ /* split for s390 to succeed */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 42),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+},
+{
+ "invalid access of tc_classid for LWT_IN",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "invalid access of tc_classid for LWT_OUT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "invalid access of tc_classid for LWT_XMIT",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+},
+{
+ "check skb->tc_classid half load not permitted for lwt prog",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid) + 2),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
diff --git a/tools/testing/selftests/bpf/verifier/map_in_map.c b/tools/testing/selftests/bpf/verifier/map_in_map.c
new file mode 100644
index 000000000000..2798927ee9ff
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/map_in_map.c
@@ -0,0 +1,62 @@
+{
+ "map in map access",
+ .insns = {
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_in_map = { 3 },
+ .result = ACCEPT,
+},
+{
+ "invalid inner map pointer",
+ .insns = {
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_in_map = { 3 },
+ .errstr = "R1 pointer arithmetic on map_ptr prohibited",
+ .result = REJECT,
+},
+{
+ "forgot null checking on the inner map pointer",
+ .insns = {
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_ST_MEM(0, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_in_map = { 3 },
+ .errstr = "R1 type=map_value_or_null expected=map_ptr",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c
new file mode 100644
index 000000000000..cd26ee6b7b1d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c
@@ -0,0 +1,100 @@
+{
+ "calls: two calls returning different map pointers for lookup (hash, array)",
+ .insns = {
+ /* main prog */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_CALL_REL(11),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_CALL_REL(12),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* subprog 1 */
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* subprog 2 */
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_hash_48b = { 13 },
+ .fixup_map_array_48b = { 16 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "calls: two calls returning different map pointers for lookup (hash, map in map)",
+ .insns = {
+ /* main prog */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_CALL_REL(11),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_CALL_REL(12),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* subprog 1 */
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* subprog 2 */
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_in_map = { 16 },
+ .fixup_map_array_48b = { 13 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'map_ptr'",
+},
+{
+ "cond: two branches returning different map pointers for lookup (tail, tail)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 5 },
+ .fixup_prog2 = { 2 },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "tail_call abusing map_ptr",
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "cond: two branches returning same map pointers for lookup (tail, tail)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog2 = { 2, 5 },
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+ .retval = 42,
+},
diff --git a/tools/testing/selftests/bpf/verifier/map_ret_val.c b/tools/testing/selftests/bpf/verifier/map_ret_val.c
new file mode 100644
index 000000000000..bdd0e8d18333
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/map_ret_val.c
@@ -0,0 +1,65 @@
+{
+ "invalid map_fd for function call",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "fd 0 is not pointing to valid bpf_map",
+ .result = REJECT,
+},
+{
+ "don't check return value before access",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 invalid mem access 'map_value_or_null'",
+ .result = REJECT,
+},
+{
+ "access memory with incorrect alignment",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "misaligned value access",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
+{
+ "sometimes access memory with incorrect alignment",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R0 invalid mem access",
+ .errstr_unpriv = "R0 leaks addr",
+ .result = REJECT,
+ .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/masking.c b/tools/testing/selftests/bpf/verifier/masking.c
new file mode 100644
index 000000000000..6e1358c544fd
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/masking.c
@@ -0,0 +1,322 @@
+{
+ "masking, test out of bounds 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 5),
+ BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 3",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 4",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0xffffffff),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 5",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 6",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 8",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 9",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 10",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0xffffffff),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 11",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test out of bounds 12",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test in bounds 1",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 4),
+ BPF_MOV32_IMM(BPF_REG_2, 5 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 4,
+},
+{
+ "masking, test in bounds 2",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test in bounds 3",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0xfffffffe),
+ BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0xfffffffe,
+},
+{
+ "masking, test in bounds 4",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0xabcde),
+ BPF_MOV32_IMM(BPF_REG_2, 0xabcdef - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0xabcde,
+},
+{
+ "masking, test in bounds 5",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_MOV32_IMM(BPF_REG_2, 1 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "masking, test in bounds 6",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 46),
+ BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 46,
+},
+{
+ "masking, test in bounds 7",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, -46),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 46,
+},
+{
+ "masking, test in bounds 8",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, -47),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1),
+ BPF_MOV32_IMM(BPF_REG_2, 47 - 1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63),
+ BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
diff --git a/tools/testing/selftests/bpf/verifier/meta_access.c b/tools/testing/selftests/bpf/verifier/meta_access.c
new file mode 100644
index 000000000000..205292b8dd65
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/meta_access.c
@@ -0,0 +1,235 @@
+{
+ "meta access, test1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet, off=-8",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test4",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test5",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3),
+ BPF_MOV64_IMM(BPF_REG_2, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_meta),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R3 !read_ok",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test6",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test7",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test8",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test9",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test10",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_IMM(BPF_REG_5, 42),
+ BPF_MOV64_IMM(BPF_REG_6, 24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test11",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_IMM(BPF_REG_5, 42),
+ BPF_MOV64_IMM(BPF_REG_6, 24),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "meta access, test12",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
diff --git a/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c b/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c
new file mode 100644
index 000000000000..471c1a5950d8
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c
@@ -0,0 +1,59 @@
+{
+ "check bpf_perf_event_data->sample_period byte load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period) + 7),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "check bpf_perf_event_data->sample_period half load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period) + 6),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "check bpf_perf_event_data->sample_period word load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+#else
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period) + 4),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "check bpf_perf_event_data->sample_period dword load permitted",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c
new file mode 100644
index 000000000000..bbdba990fefb
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c
@@ -0,0 +1,74 @@
+{
+ "prevent map lookup in sockmap",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_sockmap = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_SOCK_OPS,
+},
+{
+ "prevent map lookup in sockhash",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_sockhash = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_SOCK_OPS,
+},
+{
+ "prevent map lookup in xskmap",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_xskmap = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "prevent map lookup in stack trace",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_stacktrace = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem",
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "prevent map lookup in prog array",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog2 = { 3 },
+ .result = REJECT,
+ .errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem",
+},
diff --git a/tools/testing/selftests/bpf/verifier/raw_stack.c b/tools/testing/selftests/bpf/verifier/raw_stack.c
new file mode 100644
index 000000000000..193d9e87d5a9
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/raw_stack.c
@@ -0,0 +1,305 @@
+{
+ "raw_stack: no skb_load_bytes",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ /* Call to skb_load_bytes() omitted. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid read from stack off -8+0 size 8",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, negative len",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, negative len 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, ~0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, zero len",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, no init",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, init",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, spilled regs around bounds",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+ offsetof(struct __sk_buff, priority)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, spilled regs corruption",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "raw_stack: skb_load_bytes, spilled regs corruption 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+ offsetof(struct __sk_buff, priority)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R3 invalid mem access 'inv'",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "raw_stack: skb_load_bytes, spilled regs + data",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+ offsetof(struct __sk_buff, priority)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3 off=-513 access_size=8",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3 off=-1 access_size=8",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 min value is negative",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, invalid access 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3 off=-512 access_size=0",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "raw_stack: skb_load_bytes, large access",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 512),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/raw_tp_writable.c b/tools/testing/selftests/bpf/verifier/raw_tp_writable.c
new file mode 100644
index 000000000000..95b5d70a1dc1
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/raw_tp_writable.c
@@ -0,0 +1,34 @@
+{
+ "raw_tracepoint_writable: reject variable offset",
+ .insns = {
+ /* r6 is our tp buffer */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ /* move the key (== 0) to r10-8 */
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ /* lookup in the map */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+
+ /* exit clean if null */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* shift the buffer pointer to a variable location */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_0),
+ /* clobber whatever's there */
+ BPF_MOV64_IMM(BPF_REG_7, 4242),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, 0),
+
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1, },
+ .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
+ .errstr = "R6 invalid variable buffer offset: off=0, var_off=(0x0; 0xffffffff)",
+},
diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c
new file mode 100644
index 000000000000..ebcbf154c460
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c
@@ -0,0 +1,823 @@
+{
+ "reference tracking: leak potential reference",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: leak potential reference to sock_common",
+ .insns = {
+ BPF_SK_LOOKUP(skc_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: leak potential reference on stack",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: leak potential reference on stack 2",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: zero potential reference",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: zero potential reference to sock_common",
+ .insns = {
+ BPF_SK_LOOKUP(skc_lookup_tcp),
+ BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: copy and zero potential references",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking: release reference without check",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ /* reference in r0 may be NULL */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=sock_or_null expected=sock",
+ .result = REJECT,
+},
+{
+ "reference tracking: release reference to sock_common without check",
+ .insns = {
+ BPF_SK_LOOKUP(skc_lookup_tcp),
+ /* reference in r0 may be NULL */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=sock_common_or_null expected=sock",
+ .result = REJECT,
+},
+{
+ "reference tracking: release reference",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: release reference to sock_common",
+ .insns = {
+ BPF_SK_LOOKUP(skc_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: release reference 2",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: release reference twice",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=inv expected=sock",
+ .result = REJECT,
+},
+{
+ "reference tracking: release reference twice inside branch",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=inv expected=sock",
+ .result = REJECT,
+},
+{
+ "reference tracking: alloc, check, free in one subbranch",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
+ /* if (offsetof(skb, mark) > data_len) exit; */
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
+ offsetof(struct __sk_buff, mark)),
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
+ /* Leak reference in R0 */
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "reference tracking: alloc, check, free in both subbranches",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
+ /* if (offsetof(skb, mark) > data_len) exit; */
+ BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
+ offsetof(struct __sk_buff, mark)),
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "reference tracking in call: free reference in subprog",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking in call: free reference in subprog and outside",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "type=inv expected=sock",
+ .result = REJECT,
+},
+{
+ "reference tracking in call: alloc & leak reference in subprog",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ /* spill unchecked sk_ptr into stack of caller */
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking in call: alloc in subprog, release outside",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_EXIT_INSN(), /* return sk */
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = POINTER_VALUE,
+ .result = ACCEPT,
+},
+{
+ "reference tracking in call: sk_ptr leak into caller stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ /* spill unchecked sk_ptr into stack of caller */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "Unreleased reference",
+ .result = REJECT,
+},
+{
+ "reference tracking in call: sk_ptr spill into caller stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+
+ /* subprog 1 */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+ /* spill unchecked sk_ptr into stack of caller */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ /* now the sk_ptr is verified, free the reference */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+
+ /* subprog 2 */
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: allow LD_ABS",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: forbid LD_ABS while holding reference",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_LD_ABS(BPF_H, 0),
+ BPF_LD_ABS(BPF_W, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
+ .result = REJECT,
+},
+{
+ "reference tracking: allow LD_IND",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "reference tracking: forbid LD_IND while holding reference",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_7, 1),
+ BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
+ .result = REJECT,
+},
+{
+ "reference tracking: check reference or tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ /* if (sk) bpf_sk_release() */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
+ /* bpf_tail_call() */
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 17 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: release reference then tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ /* if (sk) bpf_sk_release() */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ /* bpf_tail_call() */
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 18 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: leak possible reference over tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ /* Look up socket and store in REG_6 */
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ /* bpf_tail_call() */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ /* if (sk) bpf_sk_release() */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 16 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "tail_call would lead to reference leak",
+ .result = REJECT,
+},
+{
+ "reference tracking: leak checked reference over tail call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ /* Look up socket and store in REG_6 */
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ /* if (!sk) goto end */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ /* bpf_tail_call() */
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 17 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "tail_call would lead to reference leak",
+ .result = REJECT,
+},
+{
+ "reference tracking: mangle and release sock_or_null",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
+ .result = REJECT,
+},
+{
+ "reference tracking: mangle and release sock",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "R1 pointer arithmetic on sock prohibited",
+ .result = REJECT,
+},
+{
+ "reference tracking: access member",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: write to member",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LD_IMM64(BPF_REG_2, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
+ offsetof(struct bpf_sock, mark)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LD_IMM64(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "cannot write into sock",
+ .result = REJECT,
+},
+{
+ "reference tracking: invalid 64-bit access of member",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid sock access off=0 size=8",
+ .result = REJECT,
+},
+{
+ "reference tracking: access after release",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "!read_ok",
+ .result = REJECT,
+},
+{
+ "reference tracking: direct access for lookup",
+ .insns = {
+ /* Check that the packet is at least 64B long */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
+ /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
+ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: use ptr from bpf_tcp_sock() after release",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "invalid mem access",
+},
+{
+ "reference tracking: use ptr from bpf_sk_fullsock() after release",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "invalid mem access",
+},
+{
+ "reference tracking: use ptr from bpf_sk_fullsock(tp) after release",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "invalid mem access",
+},
+{
+ "reference tracking: use sk after bpf_sk_release(tp)",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "invalid mem access",
+},
+{
+ "reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, src_port)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "reference tracking: bpf_sk_release(listen_sk)",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "reference has not been acquired before",
+},
+{
+ /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
+ "reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_8, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "invalid mem access",
+},
diff --git a/tools/testing/selftests/bpf/verifier/runtime_jit.c b/tools/testing/selftests/bpf/verifier/runtime_jit.c
new file mode 100644
index 000000000000..a9a8f620e71c
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/runtime_jit.c
@@ -0,0 +1,80 @@
+{
+ "runtime/jit: tail_call within bounds, prog once",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "runtime/jit: tail_call within bounds, prog loop",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 41,
+},
+{
+ "runtime/jit: tail_call within bounds, no prog",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 2),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "runtime/jit: tail_call out of bounds",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 256),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "runtime/jit: pass negative index to tail_call",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, -1),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "runtime/jit: pass > 32bit index to tail_call",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 2 },
+ .result = ACCEPT,
+ .retval = 42,
+ /* Verifier rewrite for unpriv skips tail call here. */
+ .retval_unpriv = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/scale.c b/tools/testing/selftests/bpf/verifier/scale.c
new file mode 100644
index 000000000000..7f868d4802e0
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/scale.c
@@ -0,0 +1,18 @@
+{
+ "scale: scale test 1",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_scale,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "scale: scale test 2",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_scale,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c
new file mode 100644
index 000000000000..7e50cb80873a
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/search_pruning.c
@@ -0,0 +1,156 @@
+{
+ "pointer/scalar confusion in state equality check (way 1)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_A(1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_JMP_A(0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr as return value"
+},
+{
+ "pointer/scalar confusion in state equality check (way 2)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_JMP_A(1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr as return value"
+},
+{
+ "liveness pruning and write screening",
+ .insns = {
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* branch conditions teach us nothing about R2 */
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "varlen_map_value_access pruning",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .errstr = "R0 unbounded memory access",
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "search pruning: all branches should be verified (nop operation)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_A(1),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "R6 invalid mem access 'inv'",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "search pruning: all branches should be verified (invalid stack access)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+ BPF_JMP_A(1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "invalid read from stack off -16+0 size 8",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "allocated_stack",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
+ BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
+ BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = ACCEPT,
+ .insn_processed = 15,
+},
diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c
new file mode 100644
index 000000000000..b31cd2cf50d0
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/sock.c
@@ -0,0 +1,500 @@
+{
+ "skb->sk: no NULL check",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid mem access 'sock_common_or_null'",
+},
+{
+ "skb->sk: sk->family [non fullsock field]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_sock, family)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "skb->sk: sk->type [fullsock field]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid sock_common access",
+},
+{
+ "bpf_sk_fullsock(skb->sk): no !skb->sk check",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "type=sock_common_or_null expected=sock_common",
+},
+{
+ "sk_fullsock(skb->sk): no NULL check on ret",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid mem access 'sock_or_null'",
+},
+{
+ "sk_fullsock(skb->sk): sk->type [fullsock field]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->family [non fullsock field]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, family)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->state [narrow load]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, state)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->dst_port [narrow load]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->dst_port [load 2nd byte]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid sock access",
+},
+{
+ "sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->type [narrow load]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): sk->protocol [narrow load]",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, protocol)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "sk_fullsock(skb->sk): beyond last field",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, state)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid sock access",
+},
+{
+ "bpf_tcp_sock(skb->sk): no !skb->sk check",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "type=sock_common_or_null expected=sock_common",
+},
+{
+ "bpf_tcp_sock(skb->sk): no NULL check on ret",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid mem access 'tcp_sock_or_null'",
+},
+{
+ "bpf_tcp_sock(skb->sk): tp->snd_cwnd",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "bpf_tcp_sock(skb->sk): tp->bytes_acked",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, bytes_acked)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "bpf_tcp_sock(skb->sk): beyond last field",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_tcp_sock, bytes_acked)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = REJECT,
+ .errstr = "invalid tcp_sock access",
+},
+{
+ "bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .result = ACCEPT,
+},
+{
+ "bpf_sk_release(skb->sk)",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "reference has not been acquired before",
+},
+{
+ "bpf_sk_release(bpf_sk_fullsock(skb->sk))",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "reference has not been acquired before",
+},
+{
+ "bpf_sk_release(bpf_tcp_sock(skb->sk))",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "reference has not been acquired before",
+},
+{
+ "sk_storage_get(map, skb->sk, NULL, 0): value == NULL",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_storage_get),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_sk_storage_map = { 11 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "sk_storage_get(map, skb->sk, 1, 1): value == 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_storage_get),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_sk_storage_map = { 11 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "R3 type=inv expected=fp",
+},
+{
+ "sk_storage_get(map, skb->sk, &stack_value, 1): stack_value",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_storage_get),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_sk_storage_map = { 14 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+},
+{
+ "sk_storage_get(map, skb->sk, &stack_value, 1): partially init stack_value",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_storage_get),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_sk_storage_map = { 14 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "invalid indirect read from stack",
+},
+{
+ "bpf_map_lookup_elem(smap, &key)",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_sk_storage_map = { 3 },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "cannot pass map_type 24 into func bpf_map_lookup_elem",
+},
diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c
new file mode 100644
index 000000000000..45d43bf82f26
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/spill_fill.c
@@ -0,0 +1,76 @@
+{
+ "check valid spill/fill",
+ .insns = {
+ /* spill R1(ctx) into stack */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ /* fill it back into R2 */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
+ /* should be able to access R0 = *(R2 + 8) */
+ /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .retval = POINTER_VALUE,
+},
+{
+ "check valid spill/fill, skb mark",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = ACCEPT,
+},
+{
+ "check corrupted spill/fill",
+ .insns = {
+ /* spill R1(ctx) into stack */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ /* mess up with R1 pointer on stack */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
+ /* fill back into R0 is fine for priv.
+ * R0 now becomes SCALAR_VALUE.
+ */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ /* Load from R0 should fail. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .errstr = "R0 invalid mem access 'inv",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "check corrupted spill/fill, LSB",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+},
+{
+ "check corrupted spill/fill, MSB",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+},
diff --git a/tools/testing/selftests/bpf/verifier/spin_lock.c b/tools/testing/selftests/bpf/verifier/spin_lock.c
new file mode 100644
index 000000000000..781621facae4
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/spin_lock.c
@@ -0,0 +1,333 @@
+{
+ "spin_lock: test1 success",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test2 direct ld/st",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "cannot be accessed directly",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test3 direct ld/st",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "cannot be accessed directly",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "spin_lock: test4 direct ld/st",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_6, 3),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "cannot be accessed directly",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "spin_lock: test5 call within a locked region",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "calls are not allowed",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test6 missing unlock",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "unlock is missing",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test7 unlock without lock",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "without taking a lock",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test8 double lock",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "calls are not allowed",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test9 different lock",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3, 11 },
+ .result = REJECT,
+ .errstr = "unlock of different lock",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test10 lock in subprog without unlock",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 3 },
+ .result = REJECT,
+ .errstr = "unlock is missing",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "spin_lock: test11 ld_abs under lock",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_LD_MAP_FD(BPF_REG_1,
+ 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock),
+ BPF_LD_ABS(BPF_B, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_spin_lock = { 4 },
+ .result = REJECT,
+ .errstr = "inside bpf_spin_lock",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/stack_ptr.c b/tools/testing/selftests/bpf/verifier/stack_ptr.c
new file mode 100644
index 000000000000..7276620ef242
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/stack_ptr.c
@@ -0,0 +1,317 @@
+{
+ "PTR_TO_STACK store/load",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0xfaceb00c,
+},
+{
+ "PTR_TO_STACK store/load - bad alignment on off",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
+},
+{
+ "PTR_TO_STACK store/load - bad alignment on reg",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
+},
+{
+ "PTR_TO_STACK store/load - out of bounds low",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off=-79992 size=8",
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+},
+{
+ "PTR_TO_STACK store/load - out of bounds high",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off=0 size=8",
+},
+{
+ "PTR_TO_STACK check high 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check high 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check high 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check high 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .errstr = "invalid stack off=0 size=1",
+ .result = REJECT,
+},
+{
+ "PTR_TO_STACK check high 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off",
+},
+{
+ "PTR_TO_STACK check high 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off",
+},
+{
+ "PTR_TO_STACK check high 7",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .errstr = "fp pointer offset",
+},
+{
+ "PTR_TO_STACK check low 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -512),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check low 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 1, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK check low 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .errstr = "invalid stack off=-513 size=1",
+ .result = REJECT,
+},
+{
+ "PTR_TO_STACK check low 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, INT_MIN),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "math between fp pointer",
+},
+{
+ "PTR_TO_STACK check low 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off",
+},
+{
+ "PTR_TO_STACK check low 6",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack off",
+},
+{
+ "PTR_TO_STACK check low 7",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .errstr = "fp pointer offset",
+},
+{
+ "PTR_TO_STACK mixed reg/k, 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+ BPF_MOV64_IMM(BPF_REG_2, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK mixed reg/k, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+ BPF_MOV64_IMM(BPF_REG_2, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_5, -6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "PTR_TO_STACK mixed reg/k, 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3),
+ BPF_MOV64_IMM(BPF_REG_2, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = -3,
+},
+{
+ "PTR_TO_STACK reg",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_2, -3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "invalid stack off=0 size=1",
+ .result = ACCEPT,
+ .retval = 42,
+},
+{
+ "stack pointer arithmetic",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
+ BPF_ST_MEM(0, BPF_REG_2, 4, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+ BPF_ST_MEM(0, BPF_REG_2, 4, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/subreg.c b/tools/testing/selftests/bpf/verifier/subreg.c
new file mode 100644
index 000000000000..4c4133c80440
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/subreg.c
@@ -0,0 +1,533 @@
+/* This file contains sub-register zero extension checks for insns defining
+ * sub-registers, meaning:
+ * - All insns under BPF_ALU class. Their BPF_ALU32 variants or narrow width
+ * forms (BPF_END) could define sub-registers.
+ * - Narrow direct loads, BPF_B/H/W | BPF_LDX.
+ * - BPF_LD is not exposed to JIT back-ends, so no need for testing.
+ *
+ * "get_prandom_u32" is used to initialize low 32-bit of some registers to
+ * prevent potential optimizations done by verifier or JIT back-ends which could
+ * optimize register back into constant when range info shows one register is a
+ * constant.
+ */
+{
+ "add32 reg zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
+ BPF_ALU32_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "add32 imm zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ /* An insn could have no effect on the low 32-bit, for example:
+ * a = a + 0
+ * a = a | 0
+ * a = a & -1
+ * But, they should still zero high 32-bit.
+ */
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, -2),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "sub32 reg zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL),
+ BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "sub32 imm zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "mul32 reg zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL),
+ BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "mul32 imm zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, -1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "div32 reg zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, -1),
+ BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "div32 imm zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 2),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "or32 reg zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL),
+ BPF_ALU32_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "or32 imm zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "and32 reg zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0),
+ BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL),
+ BPF_ALU32_REG(BPF_AND, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "and32 imm zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -2),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "lsh32 reg zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ALU32_REG(BPF_LSH, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "lsh32 imm zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "rsh32 reg zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ALU32_REG(BPF_RSH, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "rsh32 imm zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "neg32 reg zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_NEG, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "mod32 reg zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, -1),
+ BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "mod32 imm zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 2),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "xor32 reg zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
+ BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "xor32 imm zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_XOR, BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "mov32 reg zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0),
+ BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "mov32 imm zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "arsh32 reg zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "arsh32 imm zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "end16 (to_le) reg zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+ BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 16),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "end32 (to_le) reg zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+ BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "end16 (to_be) reg zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+ BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 16),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "end32 (to_be) reg zero extend check",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
+ BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "ldx_b zero extend check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
+ BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "ldx_h zero extend check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
+ BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_6, 0),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "ldx_w zero extend check",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
+ BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
+ BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
diff --git a/tools/testing/selftests/bpf/verifier/uninit.c b/tools/testing/selftests/bpf/verifier/uninit.c
new file mode 100644
index 000000000000..987a5871ff1d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/uninit.c
@@ -0,0 +1,39 @@
+{
+ "read uninitialized register",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R2 !read_ok",
+ .result = REJECT,
+},
+{
+ "read invalid register",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R15 is invalid",
+ .result = REJECT,
+},
+{
+ "program doesn't init R0 before exit",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .result = REJECT,
+},
+{
+ "program doesn't init R0 before exit in all branches",
+ .insns = {
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R0 !read_ok",
+ .errstr_unpriv = "R1 pointer comparison",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c
new file mode 100644
index 000000000000..91bb77c24a2e
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/unpriv.c
@@ -0,0 +1,522 @@
+{
+ "unpriv: return pointer",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr",
+ .retval = POINTER_VALUE,
+},
+{
+ "unpriv: add const to pointer",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "unpriv: add pointer to pointer",
+ .insns = {
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 pointer += pointer",
+},
+{
+ "unpriv: neg pointer",
+ .insns = {
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 pointer arithmetic",
+},
+{
+ "unpriv: cmp pointer with const",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 pointer comparison",
+},
+{
+ "unpriv: cmp pointer with pointer",
+ .insns = {
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R10 pointer comparison",
+},
+{
+ "unpriv: check that printk is disallowed",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "unknown func bpf_trace_printk#6",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "unpriv: pass pointer to helper function",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr_unpriv = "R4 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: indirectly pass pointer on stack to helper function",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr = "invalid indirect read from stack off -8+0 size 8",
+ .result = REJECT,
+},
+{
+ "unpriv: mangle pointer on stack 1",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: mangle pointer on stack 2",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: read pointer from stack in small chunks",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid size",
+ .result = REJECT,
+},
+{
+ "unpriv: write pointer into ctx",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 leaks addr",
+ .result_unpriv = REJECT,
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+},
+{
+ "unpriv: spill/fill of ctx",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+},
+{
+ "unpriv: spill/fill of ctx 2",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of ctx 3",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 type=fp expected=ctx",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of ctx 4",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10, BPF_REG_0, -8, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 type=inv expected=ctx",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct __sk_buff, mark)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "same insn cannot be used with different pointers",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx - ctx and sock",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb == NULL) *target = sock; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* else *target = skb; */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* struct __sk_buff *skb = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* skb->mark = 42; */
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct __sk_buff, mark)),
+ /* if (sk) bpf_sk_release(sk) */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "type=ctx expected=sock",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx - leak sock",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb == NULL) *target = sock; */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* else *target = skb; */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* struct __sk_buff *skb = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* skb->mark = 42; */
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ //.errstr = "same insn cannot be used with different pointers",
+ .errstr = "Unreleased reference",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx - sock and ctx (read)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb) *target = skb */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* else *target = sock */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* struct bpf_sock *sk = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct bpf_sock, mark)),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "same insn cannot be used with different pointers",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers stx - sock and ctx (write)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ /* u64 foo; */
+ /* void *target = &foo; */
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ /* if (skb) *target = skb */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ /* else *target = sock */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ /* struct bpf_sock *sk = *target; */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ /* if (sk) sk->mark = 42; bpf_sk_release(sk); */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_3, 42),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
+ offsetof(struct bpf_sock, mark)),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ //.errstr = "same insn cannot be used with different pointers",
+ .errstr = "cannot write into sock",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "unpriv: spill/fill of different pointers ldx",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
+ -(__s32)offsetof(struct bpf_perf_event_data,
+ sample_period) - 8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
+ offsetof(struct bpf_perf_event_data, sample_period)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "same insn cannot be used with different pointers",
+ .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+},
+{
+ "unpriv: write pointer into map elem value",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "alu32: mov u32 const",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_7, 0),
+ BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "unpriv: partial copy of pointer",
+ .insns = {
+ BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R10 partial copy",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: pass pointer to tail_call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
+ BPF_LD_MAP_FD(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_prog1 = { 1 },
+ .errstr_unpriv = "R3 leaks addr into helper",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: cmp map pointer with zero",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 1 },
+ .errstr_unpriv = "R1 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: write into frame pointer",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "frame pointer is read only",
+ .result = REJECT,
+},
+{
+ "unpriv: spill/fill frame pointer",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "frame pointer is read only",
+ .result = REJECT,
+},
+{
+ "unpriv: cmp of frame pointer",
+ .insns = {
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R10 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: adding of fp",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
+ "unpriv: cmp of stack pointer",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "R2 pointer comparison",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value.c b/tools/testing/selftests/bpf/verifier/value.c
new file mode 100644
index 000000000000..0e42592b1218
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value.c
@@ -0,0 +1,104 @@
+{
+ "map element value store of cleared call register",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R1 !read_ok",
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+},
+{
+ "map element value with unaligned store",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
+ BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
+ BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map element value with unaligned load",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map element value is preserved across register spilling",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, offsetof(struct test_val, foo)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value_adj_spill.c b/tools/testing/selftests/bpf/verifier/value_adj_spill.c
new file mode 100644
index 000000000000..7135e8021b81
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value_adj_spill.c
@@ -0,0 +1,43 @@
+{
+ "map element value is preserved across register spilling",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+},
+{
+ "map element value or null is marked on register spilling",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 leaks addr",
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
new file mode 100644
index 000000000000..7f6c232cd842
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
@@ -0,0 +1,94 @@
+{
+ "map element value illegal alu op, 1",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 bitwise operator &= on pointer",
+ .result = REJECT,
+},
+{
+ "map element value illegal alu op, 2",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 32-bit pointer arithmetic prohibited",
+ .result = REJECT,
+},
+{
+ "map element value illegal alu op, 3",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 pointer arithmetic with /= operator",
+ .result = REJECT,
+},
+{
+ "map element value illegal alu op, 4",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "R0 pointer arithmetic prohibited",
+ .errstr = "invalid mem access 'inv'",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map element value illegal alu op, 5",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_IMM(BPF_REG_3, 4096),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 invalid mem access 'inv'",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value_or_null.c b/tools/testing/selftests/bpf/verifier/value_or_null.c
new file mode 100644
index 000000000000..860d4a71cd83
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value_or_null.c
@@ -0,0 +1,152 @@
+{
+ "multiple registers share map_lookup_elem result",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "alu ops on ptr_to_map_value_or_null, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "R4 pointer arithmetic on map_value_or_null",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "alu ops on ptr_to_map_value_or_null, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "R4 pointer arithmetic on map_value_or_null",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "alu ops on ptr_to_map_value_or_null, 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .errstr = "R4 pointer arithmetic on map_value_or_null",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "invalid memory access with multiple map_lookup_elem calls",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .result = REJECT,
+ .errstr = "R4 !read_ok",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "valid indirect map_lookup_elem access with 2nd lookup in branch",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_2, 10),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+},
+{
+ "invalid map access from else condition",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 3 },
+ .errstr = "R0 unbounded memory access",
+ .result = REJECT,
+ .errstr_unpriv = "R0 leaks addr",
+ .result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
new file mode 100644
index 000000000000..c3de1a2c9dc5
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
@@ -0,0 +1,838 @@
+{
+ "map access: known scalar += value_ptr from different maps",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R1 tried to add from different maps",
+ .retval = 1,
+},
+{
+ "map access: value_ptr -= known scalar from different maps",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 min value is outside of the array range",
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr from different maps, but same value properties",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 5 },
+ .fixup_map_array_48b = { 8 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: mixing value pointer and scalar, 1",
+ .insns = {
+ // load map value pointer into r0 and r2
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ // load some number from the map into r1
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3),
+ // branch A
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_JMP_A(2),
+ // branch B
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+ // common instruction
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ // branch A
+ BPF_JMP_A(4),
+ // branch B
+ BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
+ // verifier follows fall-through
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ // fake-dead code; targeted from branch A to
+ // prevent dead code sanitization
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R2 tried to add from different pointers or scalars",
+ .retval = 0,
+},
+{
+ "map access: mixing value pointer and scalar, 2",
+ .insns = {
+ // load map value pointer into r0 and r2
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ // load some number from the map into r1
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ // branch A
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+ BPF_JMP_A(2),
+ // branch B
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ // common instruction
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ // depending on r1, branch:
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ // branch A
+ BPF_JMP_A(4),
+ // branch B
+ BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
+ // verifier follows fall-through
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ // fake-dead code; targeted from branch A to
+ // prevent dead code sanitization
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R2 tried to add from different maps or paths",
+ .retval = 0,
+},
+{
+ "sanitation: alu with different scalars 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
+ BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100000),
+ BPF_JMP_A(2),
+ BPF_MOV64_IMM(BPF_REG_2, 42),
+ BPF_MOV64_IMM(BPF_REG_3, 0x100001),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .retval = 0x100000,
+},
+{
+ "sanitation: alu with different scalars 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_delete_elem),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_EMIT_CALL(BPF_FUNC_map_delete_elem),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 1 },
+ .result = ACCEPT,
+ .retval = -EINVAL * 2,
+},
+{
+ "sanitation: alu with different scalars 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, EINVAL),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, -1),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, EINVAL),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, -1),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = -EINVAL * 2,
+},
+{
+ "map access: value_ptr += known scalar, upper oob arith, test 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 48),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, upper oob arith, test 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 49),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, upper oob arith, test 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr -= known scalar, lower oob arith, test 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 48),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 min value is outside of the array range",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+},
+{
+ "map access: value_ptr -= known scalar, lower oob arith, test 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 48),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr -= known scalar, lower oob arith, test 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 47),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: known scalar += value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 49),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "invalid access to map value",
+},
+{
+ "map access: value_ptr += known scalar, 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "invalid access to map value",
+},
+{
+ "map access: value_ptr += known scalar, 4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_IMM(BPF_REG_1, 5),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, -2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr += known scalar, 5",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 0xabcdef12,
+},
+{
+ "map access: value_ptr += known scalar, 6",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 0xabcdef12,
+},
+{
+ "map access: unknown scalar += value_ptr, 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: unknown scalar += value_ptr, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 0xabcdef12,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map access: unknown scalar += value_ptr, 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 0xabcdef12,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map access: unknown scalar += value_ptr, 4",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_1, 19),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 max value is outside of the array range",
+ .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map access: value_ptr += unknown scalar, 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: value_ptr += unknown scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 0xabcdef12,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "map access: value_ptr += unknown scalar, 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "map access: value_ptr += value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 pointer += pointer prohibited",
+},
+{
+ "map access: known scalar -= value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 tried to subtract pointer from scalar",
+},
+{
+ "map access: value_ptr -= known scalar",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_IMM(BPF_REG_1, 4),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 min value is outside of the array range",
+},
+{
+ "map access: value_ptr -= known scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_1, 6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: unknown scalar -= value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R1 tried to subtract pointer from scalar",
+},
+{
+ "map access: value_ptr -= unknown scalar",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 min value is negative",
+},
+{
+ "map access: value_ptr -= unknown scalar, 2",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
+ .retval = 1,
+},
+{
+ "map access: value_ptr -= value_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 3 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'inv'",
+ .errstr_unpriv = "R0 pointer -= pointer prohibited",
+},
diff --git a/tools/testing/selftests/bpf/verifier/var_off.c b/tools/testing/selftests/bpf/verifier/var_off.c
new file mode 100644
index 000000000000..8504ac937809
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/var_off.c
@@ -0,0 +1,248 @@
+{
+ "variable-offset ctx access",
+ .insns = {
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ /* add it to skb. We now have either &skb->len or
+ * &skb->pkt_type, but we don't know which
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ /* dereference it */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "variable ctx access var_off=(0x0; 0x4)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "variable-offset stack access",
+ .insns = {
+ /* Fill the top 8 bytes of the stack */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
+ /* add it to fp. We now have either fp-4 or fp-8, but
+ * we don't know which
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+ /* dereference it */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "indirect variable-offset stack access, unbounded",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 6),
+ BPF_MOV64_IMM(BPF_REG_3, 28),
+ /* Fill the top 16 bytes of the stack. */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ /* Get an unknown value. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, offsetof(struct bpf_sock_ops,
+ bytes_received)),
+ /* Check the lower bound but don't check the upper one. */
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_4, 0, 4),
+ /* Point the lower bound to initialized stack. Offset is now in range
+ * from fp-16 to fp+0x7fffffffffffffef, i.e. max value is unbounded.
+ */
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_4, 16),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_5, 8),
+ /* Dereference it indirectly. */
+ BPF_EMIT_CALL(BPF_FUNC_getsockopt),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R4 unbounded indirect variable offset stack access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SOCK_OPS,
+},
+{
+ "indirect variable-offset stack access, max out of bound",
+ .insns = {
+ /* Fill the top 8 bytes of the stack */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
+ /* add it to fp. We now have either fp-4 or fp-8, but
+ * we don't know which
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+ /* dereference it indirectly */
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 5 },
+ .errstr = "R2 max value is outside of stack bound",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "indirect variable-offset stack access, min out of bound",
+ .insns = {
+ /* Fill the top 8 bytes of the stack */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 516),
+ /* add it to fp. We now have either fp-516 or fp-512, but
+ * we don't know which
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+ /* dereference it indirectly */
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 5 },
+ .errstr = "R2 min value is outside of stack bound",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "indirect variable-offset stack access, max_off+size > max_initialized",
+ .insns = {
+ /* Fill only the second from top 8 bytes of the stack. */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+ /* Get an unknown value. */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned. */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
+ /* Add it to fp. We now have either fp-12 or fp-16, but we don't know
+ * which. fp-12 size 8 is partially uninitialized stack.
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+ /* Dereference it indirectly. */
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 5 },
+ .errstr = "invalid indirect read from stack var_off",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "indirect variable-offset stack access, min_off < min_initialized",
+ .insns = {
+ /* Fill only the top 8 bytes of the stack. */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ /* Get an unknown value */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned. */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
+ /* Add it to fp. We now have either fp-12 or fp-16, but we don't know
+ * which. fp-16 size 8 is partially uninitialized stack.
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+ /* Dereference it indirectly. */
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 5 },
+ .errstr = "invalid indirect read from stack var_off",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
+{
+ "indirect variable-offset stack access, priv vs unpriv",
+ .insns = {
+ /* Fill the top 16 bytes of the stack. */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ /* Get an unknown value. */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned. */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
+ /* Add it to fp. We now have either fp-12 or fp-16, we don't know
+ * which, but either way it points to initialized stack.
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+ /* Dereference it indirectly. */
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 6 },
+ .errstr_unpriv = "R2 stack pointer arithmetic goes out of range, prohibited for !root",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
+ "indirect variable-offset stack access, uninitialized",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 6),
+ BPF_MOV64_IMM(BPF_REG_3, 28),
+ /* Fill the top 16 bytes of the stack. */
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -16, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ /* Get an unknown value. */
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned. */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 4),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_4, 16),
+ /* Add it to fp. We now have either fp-12 or fp-16, we don't know
+ * which, but either way it points to initialized stack.
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_10),
+ BPF_MOV64_IMM(BPF_REG_5, 8),
+ /* Dereference it indirectly. */
+ BPF_EMIT_CALL(BPF_FUNC_getsockopt),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid indirect read from stack var_off",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SOCK_OPS,
+},
+{
+ "indirect variable-offset stack access, ok",
+ .insns = {
+ /* Fill the top 16 bytes of the stack. */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ /* Get an unknown value. */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ /* Make it small and 4-byte aligned. */
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
+ /* Add it to fp. We now have either fp-12 or fp-16, we don't know
+ * which, but either way it points to initialized stack.
+ */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+ /* Dereference it indirectly. */
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 6 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_IN,
+},
diff --git a/tools/testing/selftests/bpf/verifier/xadd.c b/tools/testing/selftests/bpf/verifier/xadd.c
new file mode 100644
index 000000000000..c5de2e62cc8b
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/xadd.c
@@ -0,0 +1,97 @@
+{
+ "xadd/w check unaligned stack",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "misaligned stack access off",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "xadd/w check unaligned map",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = REJECT,
+ .errstr = "misaligned value access off",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
+ "xadd/w check unaligned pkt",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 99),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 6),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
+ BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
+ BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "BPF_XADD stores into R2 pkt is not allowed",
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "xadd/w check whether src/dst got mangled, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 3,
+},
+{
+ "xadd/w check whether src/dst got mangled, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 3,
+},
diff --git a/tools/testing/selftests/bpf/verifier/xdp.c b/tools/testing/selftests/bpf/verifier/xdp.c
new file mode 100644
index 000000000000..5ac390508139
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/xdp.c
@@ -0,0 +1,14 @@
+{
+ "XDP, using ifindex from netdev",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, ingress_ifindex)),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .retval = 1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
new file mode 100644
index 000000000000..bfb97383e6b5
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
@@ -0,0 +1,900 @@
+{
+ "XDP pkt read, pkt_end mangling, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 pointer arithmetic on pkt_end",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "XDP pkt read, pkt_end mangling, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R3 pointer arithmetic on pkt_end",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+},
+{
+ "XDP pkt read, pkt_data' > pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' > pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' > pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end > pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end > pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end > pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' < pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' < pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' < pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end < pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end < pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end < pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' >= pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end >= pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end >= pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end >= pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' <= pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end <= pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end <= pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end <= pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' > pkt_data, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' > pkt_data, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data > pkt_meta', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data > pkt_meta', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data > pkt_meta', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' < pkt_data, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data < pkt_meta', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data < pkt_meta', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data < pkt_meta', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' >= pkt_data, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data >= pkt_meta', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data >= pkt_meta', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' <= pkt_data, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' <= pkt_data, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data <= pkt_meta', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/breakpoints/breakpoint_test.c b/tools/testing/selftests/breakpoints/breakpoint_test.c
index 901b85ea6a59..3266cc9293fe 100644
--- a/tools/testing/selftests/breakpoints/breakpoint_test.c
+++ b/tools/testing/selftests/breakpoints/breakpoint_test.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
*
- * Licensed under the terms of the GNU GPL License version 2
- *
* Selftests for breakpoints (and more generally the do_debug() path) in x86.
*/
@@ -21,6 +20,8 @@
#include "../kselftest.h"
+#define COUNT_ISN_BPS 4
+#define COUNT_WPS 4
/* Breakpoint access modes */
enum {
@@ -220,7 +221,7 @@ static void trigger_tests(void)
if (!local && !global)
continue;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < COUNT_ISN_BPS; i++) {
dummy_funcs[i]();
check_trapped();
}
@@ -292,7 +293,7 @@ static void launch_instruction_breakpoints(char *buf, int local, int global)
{
int i;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < COUNT_ISN_BPS; i++) {
set_breakpoint_addr(dummy_funcs[i], i);
toggle_breakpoint(i, BP_X, 1, local, global, 1);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
@@ -314,7 +315,7 @@ static void launch_watchpoints(char *buf, int mode, int len,
else
mode_str = "read";
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < COUNT_WPS; i++) {
set_breakpoint_addr(&dummy_var[i], i);
toggle_breakpoint(i, mode, len, local, global, 1);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
@@ -330,8 +331,15 @@ static void launch_watchpoints(char *buf, int mode, int len,
static void launch_tests(void)
{
char buf[1024];
+ unsigned int tests = 0;
int len, local, global, i;
+ tests += 3 * COUNT_ISN_BPS;
+ tests += sizeof(long) / 2 * 3 * COUNT_WPS;
+ tests += sizeof(long) / 2 * 3 * COUNT_WPS;
+ tests += 2;
+ ksft_set_plan(tests);
+
/* Instruction breakpoints */
for (local = 0; local < 2; local++) {
for (global = 0; global < 2; global++) {
diff --git a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
index 2d95e5adde72..58ed5eeab709 100644
--- a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
+++ b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
@@ -1,20 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Google, Inc.
*
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Original Code by Pavel Labath <labath@google.com>
*
* Code modified by Pratyush Anand <panand@redhat.com>
* for testing different byte select for each access size.
- *
*/
#define _GNU_SOURCE
@@ -118,7 +109,7 @@ static bool set_watchpoint(pid_t pid, int size, int wp)
return false;
}
-static bool run_test(int wr_size, int wp_size, int wr, int wp)
+static bool arun_test(int wr_size, int wp_size, int wr, int wp)
{
int status;
siginfo_t siginfo;
@@ -214,6 +205,7 @@ int main(int argc, char **argv)
bool result;
ksft_print_header();
+ ksft_set_plan(213);
act.sa_handler = sigalrm;
sigemptyset(&act.sa_mask);
diff --git a/tools/testing/selftests/breakpoints/step_after_suspend_test.c b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
index f82dcc1f8841..b3ead29c6089 100644
--- a/tools/testing/selftests/breakpoints/step_after_suspend_test.c
+++ b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#define _GNU_SOURCE
@@ -173,6 +164,7 @@ int main(int argc, char **argv)
int opt;
bool do_suspend = true;
bool succeeded = true;
+ unsigned int tests = 0;
cpu_set_t available_cpus;
int err;
int cpu;
@@ -191,6 +183,13 @@ int main(int argc, char **argv)
}
}
+ for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
+ if (!CPU_ISSET(cpu, &available_cpus))
+ continue;
+ tests++;
+ }
+ ksft_set_plan(tests);
+
if (do_suspend)
suspend();
diff --git a/tools/testing/selftests/capabilities/test_execve.c b/tools/testing/selftests/capabilities/test_execve.c
index 3ab39a61b95b..df0ef02b4036 100644
--- a/tools/testing/selftests/capabilities/test_execve.c
+++ b/tools/testing/selftests/capabilities/test_execve.c
@@ -430,8 +430,6 @@ int main(int argc, char **argv)
{
char *tmp1, *tmp2, *our_path;
- ksft_print_header();
-
/* Find our path */
tmp1 = strdup(argv[0]);
if (!tmp1)
@@ -445,6 +443,8 @@ int main(int argc, char **argv)
mpid = getpid();
if (fork_wait()) {
+ ksft_print_header();
+ ksft_set_plan(12);
ksft_print_msg("[RUN]\t+++ Tests with uid == 0 +++\n");
return do_tests(0, our_path);
}
@@ -452,6 +452,8 @@ int main(int argc, char **argv)
ksft_print_msg("==================================================\n");
if (fork_wait()) {
+ ksft_print_header();
+ ksft_set_plan(9);
ksft_print_msg("[RUN]\t+++ Tests with uid != 0 +++\n");
return do_tests(1, our_path);
}
diff --git a/tools/testing/selftests/cgroup/.gitignore b/tools/testing/selftests/cgroup/.gitignore
index adacda50a4b2..7f9835624793 100644
--- a/tools/testing/selftests/cgroup/.gitignore
+++ b/tools/testing/selftests/cgroup/.gitignore
@@ -1,2 +1,3 @@
test_memcontrol
test_core
+test_freezer
diff --git a/tools/testing/selftests/cgroup/Makefile b/tools/testing/selftests/cgroup/Makefile
index 23fbaa4a9630..8d369b6a2069 100644
--- a/tools/testing/selftests/cgroup/Makefile
+++ b/tools/testing/selftests/cgroup/Makefile
@@ -5,8 +5,10 @@ all:
TEST_GEN_PROGS = test_memcontrol
TEST_GEN_PROGS += test_core
+TEST_GEN_PROGS += test_freezer
include ../lib.mk
$(OUTPUT)/test_memcontrol: cgroup_util.c
$(OUTPUT)/test_core: cgroup_util.c
+$(OUTPUT)/test_freezer: cgroup_util.c
diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
index 14c9fe284806..4c223266299a 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/cgroup_util.c
@@ -74,6 +74,16 @@ char *cg_name_indexed(const char *root, const char *name, int index)
return ret;
}
+char *cg_control(const char *cgroup, const char *control)
+{
+ size_t len = strlen(cgroup) + strlen(control) + 2;
+ char *ret = malloc(len);
+
+ snprintf(ret, len, "%s/%s", cgroup, control);
+
+ return ret;
+}
+
int cg_read(const char *cgroup, const char *control, char *buf, size_t len)
{
char path[PATH_MAX];
@@ -196,7 +206,32 @@ int cg_create(const char *cgroup)
return mkdir(cgroup, 0644);
}
-static int cg_killall(const char *cgroup)
+int cg_wait_for_proc_count(const char *cgroup, int count)
+{
+ char buf[10 * PAGE_SIZE] = {0};
+ int attempts;
+ char *ptr;
+
+ for (attempts = 10; attempts >= 0; attempts--) {
+ int nr = 0;
+
+ if (cg_read(cgroup, "cgroup.procs", buf, sizeof(buf)))
+ break;
+
+ for (ptr = buf; *ptr; ptr++)
+ if (*ptr == '\n')
+ nr++;
+
+ if (nr >= count)
+ return 0;
+
+ usleep(100000);
+ }
+
+ return -1;
+}
+
+int cg_killall(const char *cgroup)
{
char buf[PAGE_SIZE];
char *ptr = buf;
@@ -227,9 +262,7 @@ int cg_destroy(const char *cgroup)
retry:
ret = rmdir(cgroup);
if (ret && errno == EBUSY) {
- ret = cg_killall(cgroup);
- if (ret)
- return ret;
+ cg_killall(cgroup);
usleep(100);
goto retry;
}
@@ -240,6 +273,14 @@ retry:
return ret;
}
+int cg_enter(const char *cgroup, int pid)
+{
+ char pidbuf[64];
+
+ snprintf(pidbuf, sizeof(pidbuf), "%d", pid);
+ return cg_write(cgroup, "cgroup.procs", pidbuf);
+}
+
int cg_enter_current(const char *cgroup)
{
char pidbuf[64];
@@ -369,3 +410,12 @@ int set_oom_adj_score(int pid, int score)
close(fd);
return 0;
}
+
+char proc_read_text(int pid, const char *item, char *buf, size_t size)
+{
+ char path[PATH_MAX];
+
+ snprintf(path, sizeof(path), "/proc/%d/%s", pid, item);
+
+ return read_text(path, buf, size);
+}
diff --git a/tools/testing/selftests/cgroup/cgroup_util.h b/tools/testing/selftests/cgroup/cgroup_util.h
index 9ac8b7958f83..c72f28046bfa 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.h
+++ b/tools/testing/selftests/cgroup/cgroup_util.h
@@ -18,6 +18,7 @@ static inline int values_close(long a, long b, int err)
extern int cg_find_unified_root(char *root, size_t len);
extern char *cg_name(const char *root, const char *name);
extern char *cg_name_indexed(const char *root, const char *name, int index);
+extern char *cg_control(const char *cgroup, const char *control);
extern int cg_create(const char *cgroup);
extern int cg_destroy(const char *cgroup);
extern int cg_read(const char *cgroup, const char *control,
@@ -32,6 +33,7 @@ extern int cg_write(const char *cgroup, const char *control, char *buf);
extern int cg_run(const char *cgroup,
int (*fn)(const char *cgroup, void *arg),
void *arg);
+extern int cg_enter(const char *cgroup, int pid);
extern int cg_enter_current(const char *cgroup);
extern int cg_run_nowait(const char *cgroup,
int (*fn)(const char *cgroup, void *arg),
@@ -41,3 +43,6 @@ extern int alloc_pagecache(int fd, size_t size);
extern int alloc_anon(const char *cgroup, void *arg);
extern int is_swap_enabled(void);
extern int set_oom_adj_score(int pid, int score);
+extern int cg_wait_for_proc_count(const char *cgroup, int count);
+extern int cg_killall(const char *cgroup);
+extern char proc_read_text(int pid, const char *item, char *buf, size_t size);
diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c
index be59f9c34ea2..79053a4f4783 100644
--- a/tools/testing/selftests/cgroup/test_core.c
+++ b/tools/testing/selftests/cgroup/test_core.c
@@ -198,7 +198,7 @@ static int test_cgcore_no_internal_process_constraint_on_threads(const char *roo
char *parent = NULL, *child = NULL;
if (cg_read_strstr(root, "cgroup.controllers", "cpu") ||
- cg_read_strstr(root, "cgroup.subtree_control", "cpu")) {
+ cg_write(root, "cgroup.subtree_control", "+cpu")) {
ret = KSFT_SKIP;
goto cleanup;
}
@@ -376,6 +376,11 @@ int main(int argc, char *argv[])
if (cg_find_unified_root(root, sizeof(root)))
ksft_exit_skip("cgroup v2 isn't mounted\n");
+
+ if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
+ if (cg_write(root, "cgroup.subtree_control", "+memory"))
+ ksft_exit_skip("Failed to set memory controller\n");
+
for (i = 0; i < ARRAY_SIZE(tests); i++) {
switch (tests[i].fn(root)) {
case KSFT_PASS:
diff --git a/tools/testing/selftests/cgroup/test_freezer.c b/tools/testing/selftests/cgroup/test_freezer.c
new file mode 100644
index 000000000000..2bfddb6d6d3b
--- /dev/null
+++ b/tools/testing/selftests/cgroup/test_freezer.c
@@ -0,0 +1,851 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <stdbool.h>
+#include <linux/limits.h>
+#include <sys/ptrace.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <poll.h>
+#include <stdlib.h>
+#include <sys/inotify.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include "../kselftest.h"
+#include "cgroup_util.h"
+
+#define DEBUG
+#ifdef DEBUG
+#define debug(args...) fprintf(stderr, args)
+#else
+#define debug(args...)
+#endif
+
+/*
+ * Check if the cgroup is frozen by looking at the cgroup.events::frozen value.
+ */
+static int cg_check_frozen(const char *cgroup, bool frozen)
+{
+ if (frozen) {
+ if (cg_read_strstr(cgroup, "cgroup.events", "frozen 1") != 0) {
+ debug("Cgroup %s isn't frozen\n", cgroup);
+ return -1;
+ }
+ } else {
+ /*
+ * Check the cgroup.events::frozen value.
+ */
+ if (cg_read_strstr(cgroup, "cgroup.events", "frozen 0") != 0) {
+ debug("Cgroup %s is frozen\n", cgroup);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Freeze the given cgroup.
+ */
+static int cg_freeze_nowait(const char *cgroup, bool freeze)
+{
+ return cg_write(cgroup, "cgroup.freeze", freeze ? "1" : "0");
+}
+
+/*
+ * Prepare for waiting on cgroup.events file.
+ */
+static int cg_prepare_for_wait(const char *cgroup)
+{
+ int fd, ret = -1;
+
+ fd = inotify_init1(0);
+ if (fd == -1) {
+ debug("Error: inotify_init1() failed\n");
+ return fd;
+ }
+
+ ret = inotify_add_watch(fd, cg_control(cgroup, "cgroup.events"),
+ IN_MODIFY);
+ if (ret == -1) {
+ debug("Error: inotify_add_watch() failed\n");
+ close(fd);
+ }
+
+ return fd;
+}
+
+/*
+ * Wait for an event. If there are no events for 10 seconds,
+ * treat this an error.
+ */
+static int cg_wait_for(int fd)
+{
+ int ret = -1;
+ struct pollfd fds = {
+ .fd = fd,
+ .events = POLLIN,
+ };
+
+ while (true) {
+ ret = poll(&fds, 1, 10000);
+
+ if (ret == -1) {
+ if (errno == EINTR)
+ continue;
+ debug("Error: poll() failed\n");
+ break;
+ }
+
+ if (ret > 0 && fds.revents & POLLIN) {
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Attach a task to the given cgroup and wait for a cgroup frozen event.
+ * All transient events (e.g. populated) are ignored.
+ */
+static int cg_enter_and_wait_for_frozen(const char *cgroup, int pid,
+ bool frozen)
+{
+ int fd, ret = -1;
+ int attempts;
+
+ fd = cg_prepare_for_wait(cgroup);
+ if (fd < 0)
+ return fd;
+
+ ret = cg_enter(cgroup, pid);
+ if (ret)
+ goto out;
+
+ for (attempts = 0; attempts < 10; attempts++) {
+ ret = cg_wait_for(fd);
+ if (ret)
+ break;
+
+ ret = cg_check_frozen(cgroup, frozen);
+ if (ret)
+ continue;
+ }
+
+out:
+ close(fd);
+ return ret;
+}
+
+/*
+ * Freeze the given cgroup and wait for the inotify signal.
+ * If there are no events in 10 seconds, treat this as an error.
+ * Then check that the cgroup is in the desired state.
+ */
+static int cg_freeze_wait(const char *cgroup, bool freeze)
+{
+ int fd, ret = -1;
+
+ fd = cg_prepare_for_wait(cgroup);
+ if (fd < 0)
+ return fd;
+
+ ret = cg_freeze_nowait(cgroup, freeze);
+ if (ret) {
+ debug("Error: cg_freeze_nowait() failed\n");
+ goto out;
+ }
+
+ ret = cg_wait_for(fd);
+ if (ret)
+ goto out;
+
+ ret = cg_check_frozen(cgroup, freeze);
+out:
+ close(fd);
+ return ret;
+}
+
+/*
+ * A simple process running in a sleep loop until being
+ * re-parented.
+ */
+static int child_fn(const char *cgroup, void *arg)
+{
+ int ppid = getppid();
+
+ while (getppid() == ppid)
+ usleep(1000);
+
+ return getppid() == ppid;
+}
+
+/*
+ * A simple test for the cgroup freezer: populated the cgroup with 100
+ * running processes and freeze it. Then unfreeze it. Then it kills all
+ * processes and destroys the cgroup.
+ */
+static int test_cgfreezer_simple(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *cgroup = NULL;
+ int i;
+
+ cgroup = cg_name(root, "cg_test_simple");
+ if (!cgroup)
+ goto cleanup;
+
+ if (cg_create(cgroup))
+ goto cleanup;
+
+ for (i = 0; i < 100; i++)
+ cg_run_nowait(cgroup, child_fn, NULL);
+
+ if (cg_wait_for_proc_count(cgroup, 100))
+ goto cleanup;
+
+ if (cg_check_frozen(cgroup, false))
+ goto cleanup;
+
+ if (cg_freeze_wait(cgroup, true))
+ goto cleanup;
+
+ if (cg_freeze_wait(cgroup, false))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (cgroup)
+ cg_destroy(cgroup);
+ free(cgroup);
+ return ret;
+}
+
+/*
+ * The test creates the following hierarchy:
+ * A
+ * / / \ \
+ * B E I K
+ * /\ |
+ * C D F
+ * |
+ * G
+ * |
+ * H
+ *
+ * with a process in C, H and 3 processes in K.
+ * Then it tries to freeze and unfreeze the whole tree.
+ */
+static int test_cgfreezer_tree(const char *root)
+{
+ char *cgroup[10] = {0};
+ int ret = KSFT_FAIL;
+ int i;
+
+ cgroup[0] = cg_name(root, "cg_test_tree_A");
+ if (!cgroup[0])
+ goto cleanup;
+
+ cgroup[1] = cg_name(cgroup[0], "B");
+ if (!cgroup[1])
+ goto cleanup;
+
+ cgroup[2] = cg_name(cgroup[1], "C");
+ if (!cgroup[2])
+ goto cleanup;
+
+ cgroup[3] = cg_name(cgroup[1], "D");
+ if (!cgroup[3])
+ goto cleanup;
+
+ cgroup[4] = cg_name(cgroup[0], "E");
+ if (!cgroup[4])
+ goto cleanup;
+
+ cgroup[5] = cg_name(cgroup[4], "F");
+ if (!cgroup[5])
+ goto cleanup;
+
+ cgroup[6] = cg_name(cgroup[5], "G");
+ if (!cgroup[6])
+ goto cleanup;
+
+ cgroup[7] = cg_name(cgroup[6], "H");
+ if (!cgroup[7])
+ goto cleanup;
+
+ cgroup[8] = cg_name(cgroup[0], "I");
+ if (!cgroup[8])
+ goto cleanup;
+
+ cgroup[9] = cg_name(cgroup[0], "K");
+ if (!cgroup[9])
+ goto cleanup;
+
+ for (i = 0; i < 10; i++)
+ if (cg_create(cgroup[i]))
+ goto cleanup;
+
+ cg_run_nowait(cgroup[2], child_fn, NULL);
+ cg_run_nowait(cgroup[7], child_fn, NULL);
+ cg_run_nowait(cgroup[9], child_fn, NULL);
+ cg_run_nowait(cgroup[9], child_fn, NULL);
+ cg_run_nowait(cgroup[9], child_fn, NULL);
+
+ /*
+ * Wait until all child processes will enter
+ * corresponding cgroups.
+ */
+
+ if (cg_wait_for_proc_count(cgroup[2], 1) ||
+ cg_wait_for_proc_count(cgroup[7], 1) ||
+ cg_wait_for_proc_count(cgroup[9], 3))
+ goto cleanup;
+
+ /*
+ * Freeze B.
+ */
+ if (cg_freeze_wait(cgroup[1], true))
+ goto cleanup;
+
+ /*
+ * Freeze F.
+ */
+ if (cg_freeze_wait(cgroup[5], true))
+ goto cleanup;
+
+ /*
+ * Freeze G.
+ */
+ if (cg_freeze_wait(cgroup[6], true))
+ goto cleanup;
+
+ /*
+ * Check that A and E are not frozen.
+ */
+ if (cg_check_frozen(cgroup[0], false))
+ goto cleanup;
+
+ if (cg_check_frozen(cgroup[4], false))
+ goto cleanup;
+
+ /*
+ * Freeze A. Check that A, B and E are frozen.
+ */
+ if (cg_freeze_wait(cgroup[0], true))
+ goto cleanup;
+
+ if (cg_check_frozen(cgroup[1], true))
+ goto cleanup;
+
+ if (cg_check_frozen(cgroup[4], true))
+ goto cleanup;
+
+ /*
+ * Unfreeze B, F and G
+ */
+ if (cg_freeze_nowait(cgroup[1], false))
+ goto cleanup;
+
+ if (cg_freeze_nowait(cgroup[5], false))
+ goto cleanup;
+
+ if (cg_freeze_nowait(cgroup[6], false))
+ goto cleanup;
+
+ /*
+ * Check that C and H are still frozen.
+ */
+ if (cg_check_frozen(cgroup[2], true))
+ goto cleanup;
+
+ if (cg_check_frozen(cgroup[7], true))
+ goto cleanup;
+
+ /*
+ * Unfreeze A. Check that A, C and K are not frozen.
+ */
+ if (cg_freeze_wait(cgroup[0], false))
+ goto cleanup;
+
+ if (cg_check_frozen(cgroup[2], false))
+ goto cleanup;
+
+ if (cg_check_frozen(cgroup[9], false))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ for (i = 9; i >= 0 && cgroup[i]; i--) {
+ cg_destroy(cgroup[i]);
+ free(cgroup[i]);
+ }
+
+ return ret;
+}
+
+/*
+ * A fork bomb emulator.
+ */
+static int forkbomb_fn(const char *cgroup, void *arg)
+{
+ int ppid;
+
+ fork();
+ fork();
+
+ ppid = getppid();
+
+ while (getppid() == ppid)
+ usleep(1000);
+
+ return getppid() == ppid;
+}
+
+/*
+ * The test runs a fork bomb in a cgroup and tries to freeze it.
+ * Then it kills all processes and checks that cgroup isn't populated
+ * anymore.
+ */
+static int test_cgfreezer_forkbomb(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *cgroup = NULL;
+
+ cgroup = cg_name(root, "cg_forkbomb_test");
+ if (!cgroup)
+ goto cleanup;
+
+ if (cg_create(cgroup))
+ goto cleanup;
+
+ cg_run_nowait(cgroup, forkbomb_fn, NULL);
+
+ usleep(100000);
+
+ if (cg_freeze_wait(cgroup, true))
+ goto cleanup;
+
+ if (cg_killall(cgroup))
+ goto cleanup;
+
+ if (cg_wait_for_proc_count(cgroup, 0))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (cgroup)
+ cg_destroy(cgroup);
+ free(cgroup);
+ return ret;
+}
+
+/*
+ * The test creates two nested cgroups, freezes the parent
+ * and removes the child. Then it checks that the parent cgroup
+ * remains frozen and it's possible to create a new child
+ * without unfreezing. The new child is frozen too.
+ */
+static int test_cgfreezer_rmdir(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *parent, *child = NULL;
+
+ parent = cg_name(root, "cg_test_rmdir_A");
+ if (!parent)
+ goto cleanup;
+
+ child = cg_name(parent, "cg_test_rmdir_B");
+ if (!child)
+ goto cleanup;
+
+ if (cg_create(parent))
+ goto cleanup;
+
+ if (cg_create(child))
+ goto cleanup;
+
+ if (cg_freeze_wait(parent, true))
+ goto cleanup;
+
+ if (cg_destroy(child))
+ goto cleanup;
+
+ if (cg_check_frozen(parent, true))
+ goto cleanup;
+
+ if (cg_create(child))
+ goto cleanup;
+
+ if (cg_check_frozen(child, true))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (child)
+ cg_destroy(child);
+ free(child);
+ if (parent)
+ cg_destroy(parent);
+ free(parent);
+ return ret;
+}
+
+/*
+ * The test creates two cgroups: A and B, runs a process in A
+ * and performs several migrations:
+ * 1) A (running) -> B (frozen)
+ * 2) B (frozen) -> A (running)
+ * 3) A (frozen) -> B (frozen)
+ *
+ * On each step it checks the actual state of both cgroups.
+ */
+static int test_cgfreezer_migrate(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *cgroup[2] = {0};
+ int pid;
+
+ cgroup[0] = cg_name(root, "cg_test_migrate_A");
+ if (!cgroup[0])
+ goto cleanup;
+
+ cgroup[1] = cg_name(root, "cg_test_migrate_B");
+ if (!cgroup[1])
+ goto cleanup;
+
+ if (cg_create(cgroup[0]))
+ goto cleanup;
+
+ if (cg_create(cgroup[1]))
+ goto cleanup;
+
+ pid = cg_run_nowait(cgroup[0], child_fn, NULL);
+ if (pid < 0)
+ goto cleanup;
+
+ if (cg_wait_for_proc_count(cgroup[0], 1))
+ goto cleanup;
+
+ /*
+ * Migrate from A (running) to B (frozen)
+ */
+ if (cg_freeze_wait(cgroup[1], true))
+ goto cleanup;
+
+ if (cg_enter_and_wait_for_frozen(cgroup[1], pid, true))
+ goto cleanup;
+
+ if (cg_check_frozen(cgroup[0], false))
+ goto cleanup;
+
+ /*
+ * Migrate from B (frozen) to A (running)
+ */
+ if (cg_enter_and_wait_for_frozen(cgroup[0], pid, false))
+ goto cleanup;
+
+ if (cg_check_frozen(cgroup[1], true))
+ goto cleanup;
+
+ /*
+ * Migrate from A (frozen) to B (frozen)
+ */
+ if (cg_freeze_wait(cgroup[0], true))
+ goto cleanup;
+
+ if (cg_enter_and_wait_for_frozen(cgroup[1], pid, true))
+ goto cleanup;
+
+ if (cg_check_frozen(cgroup[0], true))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (cgroup[0])
+ cg_destroy(cgroup[0]);
+ free(cgroup[0]);
+ if (cgroup[1])
+ cg_destroy(cgroup[1]);
+ free(cgroup[1]);
+ return ret;
+}
+
+/*
+ * The test checks that ptrace works with a tracing process in a frozen cgroup.
+ */
+static int test_cgfreezer_ptrace(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *cgroup = NULL;
+ siginfo_t siginfo;
+ int pid;
+
+ cgroup = cg_name(root, "cg_test_ptrace");
+ if (!cgroup)
+ goto cleanup;
+
+ if (cg_create(cgroup))
+ goto cleanup;
+
+ pid = cg_run_nowait(cgroup, child_fn, NULL);
+ if (pid < 0)
+ goto cleanup;
+
+ if (cg_wait_for_proc_count(cgroup, 1))
+ goto cleanup;
+
+ if (cg_freeze_wait(cgroup, true))
+ goto cleanup;
+
+ if (ptrace(PTRACE_SEIZE, pid, NULL, NULL))
+ goto cleanup;
+
+ if (ptrace(PTRACE_INTERRUPT, pid, NULL, NULL))
+ goto cleanup;
+
+ waitpid(pid, NULL, 0);
+
+ /*
+ * Cgroup has to remain frozen, however the test task
+ * is in traced state.
+ */
+ if (cg_check_frozen(cgroup, true))
+ goto cleanup;
+
+ if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &siginfo))
+ goto cleanup;
+
+ if (ptrace(PTRACE_DETACH, pid, NULL, NULL))
+ goto cleanup;
+
+ if (cg_check_frozen(cgroup, true))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (cgroup)
+ cg_destroy(cgroup);
+ free(cgroup);
+ return ret;
+}
+
+/*
+ * Check if the process is stopped.
+ */
+static int proc_check_stopped(int pid)
+{
+ char buf[PAGE_SIZE];
+ int len;
+
+ len = proc_read_text(pid, "stat", buf, sizeof(buf));
+ if (len == -1) {
+ debug("Can't get %d stat\n", pid);
+ return -1;
+ }
+
+ if (strstr(buf, "(test_freezer) T ") == NULL) {
+ debug("Process %d in the unexpected state: %s\n", pid, buf);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Test that it's possible to freeze a cgroup with a stopped process.
+ */
+static int test_cgfreezer_stopped(const char *root)
+{
+ int pid, ret = KSFT_FAIL;
+ char *cgroup = NULL;
+
+ cgroup = cg_name(root, "cg_test_stopped");
+ if (!cgroup)
+ goto cleanup;
+
+ if (cg_create(cgroup))
+ goto cleanup;
+
+ pid = cg_run_nowait(cgroup, child_fn, NULL);
+
+ if (cg_wait_for_proc_count(cgroup, 1))
+ goto cleanup;
+
+ if (kill(pid, SIGSTOP))
+ goto cleanup;
+
+ if (cg_check_frozen(cgroup, false))
+ goto cleanup;
+
+ if (cg_freeze_wait(cgroup, true))
+ goto cleanup;
+
+ if (cg_freeze_wait(cgroup, false))
+ goto cleanup;
+
+ if (proc_check_stopped(pid))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (cgroup)
+ cg_destroy(cgroup);
+ free(cgroup);
+ return ret;
+}
+
+/*
+ * Test that it's possible to freeze a cgroup with a ptraced process.
+ */
+static int test_cgfreezer_ptraced(const char *root)
+{
+ int pid, ret = KSFT_FAIL;
+ char *cgroup = NULL;
+ siginfo_t siginfo;
+
+ cgroup = cg_name(root, "cg_test_ptraced");
+ if (!cgroup)
+ goto cleanup;
+
+ if (cg_create(cgroup))
+ goto cleanup;
+
+ pid = cg_run_nowait(cgroup, child_fn, NULL);
+
+ if (cg_wait_for_proc_count(cgroup, 1))
+ goto cleanup;
+
+ if (ptrace(PTRACE_SEIZE, pid, NULL, NULL))
+ goto cleanup;
+
+ if (ptrace(PTRACE_INTERRUPT, pid, NULL, NULL))
+ goto cleanup;
+
+ waitpid(pid, NULL, 0);
+
+ if (cg_check_frozen(cgroup, false))
+ goto cleanup;
+
+ if (cg_freeze_wait(cgroup, true))
+ goto cleanup;
+
+ /*
+ * cg_check_frozen(cgroup, true) will fail here,
+ * because the task in in the TRACEd state.
+ */
+ if (cg_freeze_wait(cgroup, false))
+ goto cleanup;
+
+ if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &siginfo))
+ goto cleanup;
+
+ if (ptrace(PTRACE_DETACH, pid, NULL, NULL))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (cgroup)
+ cg_destroy(cgroup);
+ free(cgroup);
+ return ret;
+}
+
+static int vfork_fn(const char *cgroup, void *arg)
+{
+ int pid = vfork();
+
+ if (pid == 0)
+ while (true)
+ sleep(1);
+
+ return pid;
+}
+
+/*
+ * Test that it's possible to freeze a cgroup with a process,
+ * which called vfork() and is waiting for a child.
+ */
+static int test_cgfreezer_vfork(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *cgroup = NULL;
+
+ cgroup = cg_name(root, "cg_test_vfork");
+ if (!cgroup)
+ goto cleanup;
+
+ if (cg_create(cgroup))
+ goto cleanup;
+
+ cg_run_nowait(cgroup, vfork_fn, NULL);
+
+ if (cg_wait_for_proc_count(cgroup, 2))
+ goto cleanup;
+
+ if (cg_freeze_wait(cgroup, true))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (cgroup)
+ cg_destroy(cgroup);
+ free(cgroup);
+ return ret;
+}
+
+#define T(x) { x, #x }
+struct cgfreezer_test {
+ int (*fn)(const char *root);
+ const char *name;
+} tests[] = {
+ T(test_cgfreezer_simple),
+ T(test_cgfreezer_tree),
+ T(test_cgfreezer_forkbomb),
+ T(test_cgfreezer_rmdir),
+ T(test_cgfreezer_migrate),
+ T(test_cgfreezer_ptrace),
+ T(test_cgfreezer_stopped),
+ T(test_cgfreezer_ptraced),
+ T(test_cgfreezer_vfork),
+};
+#undef T
+
+int main(int argc, char *argv[])
+{
+ char root[PATH_MAX];
+ int i, ret = EXIT_SUCCESS;
+
+ if (cg_find_unified_root(root, sizeof(root)))
+ ksft_exit_skip("cgroup v2 isn't mounted\n");
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ switch (tests[i].fn(root)) {
+ case KSFT_PASS:
+ ksft_test_result_pass("%s\n", tests[i].name);
+ break;
+ case KSFT_SKIP:
+ ksft_test_result_skip("%s\n", tests[i].name);
+ break;
+ default:
+ ret = EXIT_FAILURE;
+ ksft_test_result_fail("%s\n", tests[i].name);
+ break;
+ }
+ }
+
+ return ret;
+}
diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
index 28d321ba311b..c19a97dd02d4 100644
--- a/tools/testing/selftests/cgroup/test_memcontrol.c
+++ b/tools/testing/selftests/cgroup/test_memcontrol.c
@@ -26,7 +26,7 @@
*/
static int test_memcg_subtree_control(const char *root)
{
- char *parent, *child, *parent2, *child2;
+ char *parent, *child, *parent2 = NULL, *child2 = NULL;
int ret = KSFT_FAIL;
char buf[PAGE_SIZE];
@@ -34,50 +34,54 @@ static int test_memcg_subtree_control(const char *root)
parent = cg_name(root, "memcg_test_0");
child = cg_name(root, "memcg_test_0/memcg_test_1");
if (!parent || !child)
- goto cleanup;
+ goto cleanup_free;
if (cg_create(parent))
- goto cleanup;
+ goto cleanup_free;
if (cg_write(parent, "cgroup.subtree_control", "+memory"))
- goto cleanup;
+ goto cleanup_parent;
if (cg_create(child))
- goto cleanup;
+ goto cleanup_parent;
if (cg_read_strstr(child, "cgroup.controllers", "memory"))
- goto cleanup;
+ goto cleanup_child;
/* Create two nested cgroups without enabling memory controller */
parent2 = cg_name(root, "memcg_test_1");
child2 = cg_name(root, "memcg_test_1/memcg_test_1");
if (!parent2 || !child2)
- goto cleanup;
+ goto cleanup_free2;
if (cg_create(parent2))
- goto cleanup;
+ goto cleanup_free2;
if (cg_create(child2))
- goto cleanup;
+ goto cleanup_parent2;
if (cg_read(child2, "cgroup.controllers", buf, sizeof(buf)))
- goto cleanup;
+ goto cleanup_all;
if (!cg_read_strstr(child2, "cgroup.controllers", "memory"))
- goto cleanup;
+ goto cleanup_all;
ret = KSFT_PASS;
-cleanup:
- cg_destroy(child);
- cg_destroy(parent);
- free(parent);
- free(child);
-
+cleanup_all:
cg_destroy(child2);
+cleanup_parent2:
cg_destroy(parent2);
+cleanup_free2:
free(parent2);
free(child2);
+cleanup_child:
+ cg_destroy(child);
+cleanup_parent:
+ cg_destroy(parent);
+cleanup_free:
+ free(parent);
+ free(child);
return ret;
}
@@ -1201,6 +1205,10 @@ int main(int argc, char **argv)
if (cg_read_strstr(root, "cgroup.controllers", "memory"))
ksft_exit_skip("memory controller isn't available\n");
+ if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
+ if (cg_write(root, "cgroup.subtree_control", "+memory"))
+ ksft_exit_skip("Failed to set memory controller\n");
+
for (i = 0; i < ARRAY_SIZE(tests); i++) {
switch (tests[i].fn(root)) {
case KSFT_PASS:
diff --git a/tools/testing/selftests/drivers/.gitignore b/tools/testing/selftests/drivers/.gitignore
new file mode 100644
index 000000000000..f6aebcc27b76
--- /dev/null
+++ b/tools/testing/selftests/drivers/.gitignore
@@ -0,0 +1 @@
+/dma-buf/udmabuf
diff --git a/tools/testing/selftests/drivers/dma-buf/Makefile b/tools/testing/selftests/drivers/dma-buf/Makefile
index f22c3f7cf612..79cb16b4e01a 100644
--- a/tools/testing/selftests/drivers/dma-buf/Makefile
+++ b/tools/testing/selftests/drivers/dma-buf/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
CFLAGS += -I../../../../../usr/include/
TEST_GEN_PROGS := udmabuf
diff --git a/tools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh b/tools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh
new file mode 100755
index 000000000000..5ba5bef44d5b
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/blackhole_routes.sh
@@ -0,0 +1,200 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test that blackhole routes are marked as offloaded and that packets hitting
+# them are dropped by the ASIC and not by the kernel.
+#
+# +---------------------------------+
+# | H1 (vrf) |
+# | + $h1 |
+# | | 192.0.2.1/24 |
+# | | 2001:db8:1::1/64 |
+# | | |
+# | | default via 192.0.2.2 |
+# | | default via 2001:db8:1::2 |
+# +----|----------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | SW | |
+# | + $rp1 |
+# | 192.0.2.2/24 |
+# | 2001:db8:1::2/64 |
+# | |
+# | 2001:db8:2::2/64 |
+# | 198.51.100.2/24 |
+# | + $rp2 |
+# | | |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|----------------------------+
+# | | default via 198.51.100.2 |
+# | | default via 2001:db8:2::2 |
+# | | |
+# | | 2001:db8:2::1/64 |
+# | | 198.51.100.1/24 |
+# | + $h2 |
+# | H2 (vrf) |
+# +---------------------------------+
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+ blackhole_ipv4
+ blackhole_ipv6
+"
+NUM_NETIFS=4
+source $lib_dir/tc_common.sh
+source $lib_dir/lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
+
+ ip -4 route add default vrf v$h1 nexthop via 192.0.2.2
+ ip -6 route add default vrf v$h1 nexthop via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+ ip -6 route del default vrf v$h1 nexthop via 2001:db8:1::2
+ ip -4 route del default vrf v$h1 nexthop via 192.0.2.2
+
+ simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 198.51.100.1/24 2001:db8:2::1/64
+
+ ip -4 route add default vrf v$h2 nexthop via 198.51.100.2
+ ip -6 route add default vrf v$h2 nexthop via 2001:db8:2::2
+}
+
+h2_destroy()
+{
+ ip -6 route del default vrf v$h2 nexthop via 2001:db8:2::2
+ ip -4 route del default vrf v$h2 nexthop via 198.51.100.2
+
+ simple_if_fini $h2 198.51.100.1/24 2001:db8:2::1/64
+}
+
+router_create()
+{
+ ip link set dev $rp1 up
+ ip link set dev $rp2 up
+
+ tc qdisc add dev $rp1 clsact
+
+ __addr_add_del $rp1 add 192.0.2.2/24 2001:db8:1::2/64
+ __addr_add_del $rp2 add 198.51.100.2/24 2001:db8:2::2/64
+}
+
+router_destroy()
+{
+ __addr_add_del $rp2 del 198.51.100.2/24 2001:db8:2::2/64
+ __addr_add_del $rp1 del 192.0.2.2/24 2001:db8:1::2/64
+
+ tc qdisc del dev $rp1 clsact
+
+ ip link set dev $rp2 down
+ ip link set dev $rp1 down
+}
+
+ping_ipv4()
+{
+ ping_test $h1 198.51.100.1 ": h1->h2"
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:2::1 ": h1->h2"
+}
+
+blackhole_ipv4()
+{
+ # Transmit packets from H1 to H2 and make sure they are dropped by the
+ # ASIC and not by the kernel
+ RET=0
+
+ ip -4 route add blackhole 198.51.100.0/30
+ tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \
+ skip_hw dst_ip 198.51.100.1 src_ip 192.0.2.1 ip_proto icmp \
+ action pass
+
+ ip -4 route show 198.51.100.0/30 | grep -q offload
+ check_err $? "route not marked as offloaded when should"
+
+ ping_do $h1 198.51.100.1
+ check_fail $? "ping passed when should not"
+
+ tc_check_packets "dev $rp1 ingress" 101 0
+ check_err $? "packets trapped and not dropped by ASIC"
+
+ log_test "IPv4 blackhole route"
+
+ tc filter del dev $rp1 ingress protocol ip pref 1 handle 101 flower
+ ip -4 route del blackhole 198.51.100.0/30
+}
+
+blackhole_ipv6()
+{
+ RET=0
+
+ ip -6 route add blackhole 2001:db8:2::/120
+ tc filter add dev $rp1 ingress protocol ipv6 pref 1 handle 101 flower \
+ skip_hw dst_ip 2001:db8:2::1 src_ip 2001:db8:1::1 \
+ ip_proto icmpv6 action pass
+
+ ip -6 route show 2001:db8:2::/120 | grep -q offload
+ check_err $? "route not marked as offloaded when should"
+
+ ping6_do $h1 2001:db8:2::1
+ check_fail $? "ping passed when should not"
+
+ tc_check_packets "dev $rp1 ingress" 101 0
+ check_err $? "packets trapped and not dropped by ASIC"
+
+ log_test "IPv6 blackhole route"
+
+ tc filter del dev $rp1 ingress protocol ipv6 pref 1 handle 101 flower
+ ip -6 route del blackhole 2001:db8:2::/120
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp1=${NETIFS[p2]}
+
+ rp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+ router_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ router_destroy
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
index 1ca631d5aaba..40f16f2a3afd 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh
@@ -148,9 +148,10 @@ dscp_ping_test()
eval "t0s=($(dscp_fetch_stats $dev_10 10)
$(dscp_fetch_stats $dev_20 20))"
+ local ping_timeout=$((PING_TIMEOUT * 5))
ip vrf exec $vrf_name \
${PING} -Q $dscp_10 ${sip:+-I $sip} $dip \
- -c 10 -i 0.1 -w 2 &> /dev/null
+ -c 10 -i 0.5 -w $ping_timeout &> /dev/null
local -A t1s
eval "t1s=($(dscp_fetch_stats $dev_10 10)
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
index 281d90766e12..9faf02e32627 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh
@@ -169,9 +169,10 @@ dscp_ping_test()
eval "local -A dev1_t0s=($(dscp_fetch_stats $dev1 0))"
eval "local -A dev2_t0s=($(dscp_fetch_stats $dev2 0))"
+ local ping_timeout=$((PING_TIMEOUT * 5))
ip vrf exec $vrf_name \
${PING} -Q $dscp ${sip:+-I $sip} $dip \
- -c 10 -i 0.1 -w 2 &> /dev/null
+ -c 10 -i 0.5 -w $ping_timeout &> /dev/null
eval "local -A dev1_t1s=($(dscp_fetch_stats $dev1 0))"
eval "local -A dev2_t1s=($(dscp_fetch_stats $dev2 0))"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh
new file mode 100755
index 000000000000..6d1790b5de7a
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh
@@ -0,0 +1,311 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# A test for strict prioritization of traffic in the switch. Run two streams of
+# traffic, each through a different ingress port, one tagged with PCP of 1, the
+# other with PCP of 2. Both streams converge at one egress port, where they are
+# assigned TC of, respectively, 1 and 2, with strict priority configured between
+# them. In H3, we expect to see (almost) exclusively the high-priority traffic.
+#
+# Please see qos_mc_aware.sh for an explanation of why we use mausezahn and
+# counters instead of just running iperf3.
+#
+# +---------------------------+ +-----------------------------+
+# | H1 | | H2 |
+# | $h1.111 + | | + $h2.222 |
+# | 192.0.2.33/28 | | | | 192.0.2.65/28 |
+# | e-qos-map 0:1 | | | | e-qos-map 0:2 |
+# | | | | | |
+# | $h1 + | | + $h2 |
+# +-----------------|---------+ +---------|-------------------+
+# | |
+# +-----------------|-------------------------------------|-------------------+
+# | $swp1 + + $swp2 |
+# | >1Gbps | | >1Gbps |
+# | +---------------|-----------+ +----------|----------------+ |
+# | | $swp1.111 + | | + $swp2.222 | |
+# | | BR111 | SW | BR222 | |
+# | | $swp3.111 + | | + $swp3.222 | |
+# | +---------------|-----------+ +----------|----------------+ |
+# | \_____________________________________/ |
+# | | |
+# | + $swp3 |
+# | | 1Gbps bottleneck |
+# | | ETS: (up n->tc n for n in 0..7) |
+# | | strict priority |
+# +------------------------------------|--------------------------------------+
+# |
+# +--------------------|--------------------+
+# | + $h3 H3 |
+# | / \ |
+# | / \ |
+# | $h3.111 + + $h3.222 |
+# | 192.0.2.34/28 192.0.2.66/28 |
+# +-----------------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ test_ets_strict
+"
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+NUM_NETIFS=6
+source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
+source qos_lib.sh
+
+h1_create()
+{
+ simple_if_init $h1
+ mtu_set $h1 10000
+
+ vlan_create $h1 111 v$h1 192.0.2.33/28
+ ip link set dev $h1.111 type vlan egress-qos-map 0:1
+}
+
+h1_destroy()
+{
+ vlan_destroy $h1 111
+
+ mtu_restore $h1
+ simple_if_fini $h1
+}
+
+h2_create()
+{
+ simple_if_init $h2
+ mtu_set $h2 10000
+
+ vlan_create $h2 222 v$h2 192.0.2.65/28
+ ip link set dev $h2.222 type vlan egress-qos-map 0:2
+}
+
+h2_destroy()
+{
+ vlan_destroy $h2 222
+
+ mtu_restore $h2
+ simple_if_fini $h2
+}
+
+h3_create()
+{
+ simple_if_init $h3
+ mtu_set $h3 10000
+
+ vlan_create $h3 111 v$h3 192.0.2.34/28
+ vlan_create $h3 222 v$h3 192.0.2.66/28
+}
+
+h3_destroy()
+{
+ vlan_destroy $h3 222
+ vlan_destroy $h3 111
+
+ mtu_restore $h3
+ simple_if_fini $h3
+}
+
+switch_create()
+{
+ ip link set dev $swp1 up
+ mtu_set $swp1 10000
+
+ ip link set dev $swp2 up
+ mtu_set $swp2 10000
+
+ # prio n -> TC n, strict scheduling
+ lldptool -T -i $swp3 -V ETS-CFG up2tc=0:0,1:1,2:2,3:3,4:4,5:5,6:6,7:7
+ lldptool -T -i $swp3 -V ETS-CFG tsa=$(
+ )"0:strict,"$(
+ )"1:strict,"$(
+ )"2:strict,"$(
+ )"3:strict,"$(
+ )"4:strict,"$(
+ )"5:strict,"$(
+ )"6:strict,"$(
+ )"7:strict"
+ sleep 1
+
+ ip link set dev $swp3 up
+ mtu_set $swp3 10000
+ ethtool -s $swp3 speed 1000 autoneg off
+
+ vlan_create $swp1 111
+ vlan_create $swp2 222
+ vlan_create $swp3 111
+ vlan_create $swp3 222
+
+ ip link add name br111 up type bridge vlan_filtering 0
+ ip link set dev $swp1.111 master br111
+ ip link set dev $swp3.111 master br111
+
+ ip link add name br222 up type bridge vlan_filtering 0
+ ip link set dev $swp2.222 master br222
+ ip link set dev $swp3.222 master br222
+
+ # Make sure that ingress quotas are smaller than egress so that there is
+ # room for both streams of traffic to be admitted to shared buffer.
+ devlink_pool_size_thtype_set 0 dynamic 10000000
+ devlink_pool_size_thtype_set 4 dynamic 10000000
+
+ devlink_port_pool_th_set $swp1 0 6
+ devlink_tc_bind_pool_th_set $swp1 1 ingress 0 6
+
+ devlink_port_pool_th_set $swp2 0 6
+ devlink_tc_bind_pool_th_set $swp2 2 ingress 0 6
+
+ devlink_tc_bind_pool_th_set $swp3 1 egress 4 7
+ devlink_tc_bind_pool_th_set $swp3 2 egress 4 7
+ devlink_port_pool_th_set $swp3 4 7
+}
+
+switch_destroy()
+{
+ devlink_port_pool_th_restore $swp3 4
+ devlink_tc_bind_pool_th_restore $swp3 2 egress
+ devlink_tc_bind_pool_th_restore $swp3 1 egress
+
+ devlink_tc_bind_pool_th_restore $swp2 2 ingress
+ devlink_port_pool_th_restore $swp2 0
+
+ devlink_tc_bind_pool_th_restore $swp1 1 ingress
+ devlink_port_pool_th_restore $swp1 0
+
+ devlink_pool_size_thtype_restore 4
+ devlink_pool_size_thtype_restore 0
+
+ ip link del dev br222
+ ip link del dev br111
+
+ vlan_destroy $swp3 222
+ vlan_destroy $swp3 111
+ vlan_destroy $swp2 222
+ vlan_destroy $swp1 111
+
+ ethtool -s $swp3 autoneg on
+ mtu_restore $swp3
+ ip link set dev $swp3 down
+ lldptool -T -i $swp3 -V ETS-CFG up2tc=0:0,1:0,2:0,3:0,4:0,5:0,6:0,7:0
+
+ mtu_restore $swp2
+ ip link set dev $swp2 down
+
+ mtu_restore $swp1
+ ip link set dev $swp1 down
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ h3mac=$(mac_get $h3)
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+ h3_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h3_destroy
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.34 " from H1"
+ ping_test $h2 192.0.2.66 " from H2"
+}
+
+rel()
+{
+ local old=$1; shift
+ local new=$1; shift
+
+ bc <<< "
+ scale=2
+ ret = 100 * $new / $old
+ if (ret > 0) { ret } else { 0 }
+ "
+}
+
+test_ets_strict()
+{
+ RET=0
+
+ # Run high-prio traffic on its own.
+ start_traffic $h2.222 192.0.2.65 192.0.2.66 $h3mac
+ local -a rate_2
+ rate_2=($(measure_rate $swp2 $h3 rx_octets_prio_2 "prio 2"))
+ check_err $? "Could not get high enough prio-2 ingress rate"
+ local rate_2_in=${rate_2[0]}
+ local rate_2_eg=${rate_2[1]}
+ stop_traffic # $h2.222
+
+ # Start low-prio stream.
+ start_traffic $h1.111 192.0.2.33 192.0.2.34 $h3mac
+
+ local -a rate_1
+ rate_1=($(measure_rate $swp1 $h3 rx_octets_prio_1 "prio 1"))
+ check_err $? "Could not get high enough prio-1 ingress rate"
+ local rate_1_in=${rate_1[0]}
+ local rate_1_eg=${rate_1[1]}
+
+ # High-prio and low-prio on their own should have about the same
+ # throughput.
+ local rel21=$(rel $rate_1_eg $rate_2_eg)
+ check_err $(bc <<< "$rel21 < 95")
+ check_err $(bc <<< "$rel21 > 105")
+
+ # Start the high-prio stream--now both streams run.
+ start_traffic $h2.222 192.0.2.65 192.0.2.66 $h3mac
+ rate_3=($(measure_rate $swp2 $h3 rx_octets_prio_2 "prio 2 w/ 1"))
+ check_err $? "Could not get high enough prio-2 ingress rate with prio-1"
+ local rate_3_in=${rate_3[0]}
+ local rate_3_eg=${rate_3[1]}
+ stop_traffic # $h2.222
+
+ stop_traffic # $h1.111
+
+ # High-prio should have about the same throughput whether or not
+ # low-prio is in the system.
+ local rel32=$(rel $rate_2_eg $rate_3_eg)
+ check_err $(bc <<< "$rel32 < 95")
+
+ log_test "strict priority"
+ echo "Ingress to switch:"
+ echo " p1 in rate $(humanize $rate_1_in)"
+ echo " p2 in rate $(humanize $rate_2_in)"
+ echo " p2 in rate w/ p1 $(humanize $rate_3_in)"
+ echo "Egress from switch:"
+ echo " p1 eg rate $(humanize $rate_1_eg)"
+ echo " p2 eg rate $(humanize $rate_2_eg) ($rel21% of p1)"
+ echo " p2 eg rate w/ p1 $(humanize $rate_3_eg) ($rel32% of p2)"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh
new file mode 100644
index 000000000000..e80be65799ad
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: GPL-2.0
+
+humanize()
+{
+ local speed=$1; shift
+
+ for unit in bps Kbps Mbps Gbps; do
+ if (($(echo "$speed < 1024" | bc))); then
+ break
+ fi
+
+ speed=$(echo "scale=1; $speed / 1024" | bc)
+ done
+
+ echo "$speed${unit}"
+}
+
+rate()
+{
+ local t0=$1; shift
+ local t1=$1; shift
+ local interval=$1; shift
+
+ echo $((8 * (t1 - t0) / interval))
+}
+
+start_traffic()
+{
+ local h_in=$1; shift # Where the traffic egresses the host
+ local sip=$1; shift
+ local dip=$1; shift
+ local dmac=$1; shift
+
+ $MZ $h_in -p 8000 -A $sip -B $dip -c 0 \
+ -a own -b $dmac -t udp -q &
+ sleep 1
+}
+
+stop_traffic()
+{
+ # Suppress noise from killing mausezahn.
+ { kill %% && wait %%; } 2>/dev/null
+}
+
+check_rate()
+{
+ local rate=$1; shift
+ local min=$1; shift
+ local what=$1; shift
+
+ if ((rate > min)); then
+ return 0
+ fi
+
+ echo "$what $(humanize $ir) < $(humanize $min)" > /dev/stderr
+ return 1
+}
+
+measure_rate()
+{
+ local sw_in=$1; shift # Where the traffic ingresses the switch
+ local host_in=$1; shift # Where it ingresses another host
+ local counter=$1; shift # Counter to use for measurement
+ local what=$1; shift
+
+ local interval=10
+ local i
+ local ret=0
+
+ # Dips in performance might cause momentary ingress rate to drop below
+ # 1Gbps. That wouldn't saturate egress and MC would thus get through,
+ # seemingly winning bandwidth on account of UC. Demand at least 2Gbps
+ # average ingress rate to somewhat mitigate this.
+ local min_ingress=2147483648
+
+ for i in {5..0}; do
+ local t0=$(ethtool_stats_get $host_in $counter)
+ local u0=$(ethtool_stats_get $sw_in $counter)
+ sleep $interval
+ local t1=$(ethtool_stats_get $host_in $counter)
+ local u1=$(ethtool_stats_get $sw_in $counter)
+
+ local ir=$(rate $u0 $u1 $interval)
+ local er=$(rate $t0 $t1 $interval)
+
+ if check_rate $ir $min_ingress "$what ingress rate"; then
+ break
+ fi
+
+ # Fail the test if we can't get the throughput.
+ if ((i == 0)); then
+ ret=1
+ fi
+ done
+
+ echo $ir $er
+ return $ret
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
index 117f6f35d72f..71231ad2dbfb 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
@@ -67,6 +67,8 @@ lib_dir=$(dirname $0)/../../../net/forwarding
NUM_NETIFS=6
source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
+source qos_lib.sh
h1_create()
{
@@ -140,10 +142,28 @@ switch_create()
ip link set dev br111 up
ip link set dev $swp2.111 master br111
ip link set dev $swp3.111 master br111
+
+ # Make sure that ingress quotas are smaller than egress so that there is
+ # room for both streams of traffic to be admitted to shared buffer.
+ devlink_port_pool_th_set $swp1 0 5
+ devlink_tc_bind_pool_th_set $swp1 0 ingress 0 5
+
+ devlink_port_pool_th_set $swp2 0 5
+ devlink_tc_bind_pool_th_set $swp2 1 ingress 0 5
+
+ devlink_port_pool_th_set $swp3 4 12
}
switch_destroy()
{
+ devlink_port_pool_th_restore $swp3 4
+
+ devlink_tc_bind_pool_th_restore $swp2 1 ingress
+ devlink_port_pool_th_restore $swp2 0
+
+ devlink_tc_bind_pool_th_restore $swp1 0 ingress
+ devlink_port_pool_th_restore $swp1 0
+
ip link del dev br111
ip link del dev br1
@@ -201,107 +221,28 @@ ping_ipv4()
ping_test $h2 192.0.2.130
}
-humanize()
-{
- local speed=$1; shift
-
- for unit in bps Kbps Mbps Gbps; do
- if (($(echo "$speed < 1024" | bc))); then
- break
- fi
-
- speed=$(echo "scale=1; $speed / 1024" | bc)
- done
-
- echo "$speed${unit}"
-}
-
-rate()
-{
- local t0=$1; shift
- local t1=$1; shift
- local interval=$1; shift
-
- echo $((8 * (t1 - t0) / interval))
-}
-
-check_rate()
-{
- local rate=$1; shift
- local min=$1; shift
- local what=$1; shift
-
- if ((rate > min)); then
- return 0
- fi
-
- echo "$what $(humanize $ir) < $(humanize $min_ingress)" > /dev/stderr
- return 1
-}
-
-measure_uc_rate()
-{
- local what=$1; shift
-
- local interval=10
- local i
- local ret=0
-
- # Dips in performance might cause momentary ingress rate to drop below
- # 1Gbps. That wouldn't saturate egress and MC would thus get through,
- # seemingly winning bandwidth on account of UC. Demand at least 2Gbps
- # average ingress rate to somewhat mitigate this.
- local min_ingress=2147483648
-
- $MZ $h2.111 -p 8000 -A 192.0.2.129 -B 192.0.2.130 -c 0 \
- -a own -b $h3mac -t udp -q &
- sleep 1
-
- for i in {5..0}; do
- local t0=$(ethtool_stats_get $h3 rx_octets_prio_1)
- local u0=$(ethtool_stats_get $swp2 rx_octets_prio_1)
- sleep $interval
- local t1=$(ethtool_stats_get $h3 rx_octets_prio_1)
- local u1=$(ethtool_stats_get $swp2 rx_octets_prio_1)
-
- local ir=$(rate $u0 $u1 $interval)
- local er=$(rate $t0 $t1 $interval)
-
- if check_rate $ir $min_ingress "$what ingress rate"; then
- break
- fi
-
- # Fail the test if we can't get the throughput.
- if ((i == 0)); then
- ret=1
- fi
- done
-
- # Suppress noise from killing mausezahn.
- { kill %% && wait; } 2>/dev/null
-
- echo $ir $er
- exit $ret
-}
-
test_mc_aware()
{
RET=0
local -a uc_rate
- uc_rate=($(measure_uc_rate "UC-only"))
+ start_traffic $h2.111 192.0.2.129 192.0.2.130 $h3mac
+ uc_rate=($(measure_rate $swp2 $h3 rx_octets_prio_1 "UC-only"))
check_err $? "Could not get high enough UC-only ingress rate"
+ stop_traffic
local ucth1=${uc_rate[1]}
- $MZ $h1 -p 8000 -c 0 -a own -b bc -t udp -q &
+ start_traffic $h1 own bc bc
local d0=$(date +%s)
local t0=$(ethtool_stats_get $h3 rx_octets_prio_0)
local u0=$(ethtool_stats_get $swp1 rx_octets_prio_0)
local -a uc_rate_2
- uc_rate_2=($(measure_uc_rate "UC+MC"))
+ start_traffic $h2.111 192.0.2.129 192.0.2.130 $h3mac
+ uc_rate_2=($(measure_rate $swp2 $h3 rx_octets_prio_1 "UC+MC"))
check_err $? "Could not get high enough UC+MC ingress rate"
+ stop_traffic
local ucth2=${uc_rate_2[1]}
local d1=$(date +%s)
@@ -319,8 +260,7 @@ test_mc_aware()
local mc_ir=$(rate $u0 $u1 $interval)
local mc_er=$(rate $t0 $t1 $interval)
- # Suppress noise from killing mausezahn.
- { kill %% && wait; } 2>/dev/null
+ stop_traffic
log_test "UC performace under MC overload"
@@ -344,8 +284,7 @@ test_uc_aware()
{
RET=0
- $MZ $h2.111 -p 8000 -A 192.0.2.129 -B 192.0.2.130 -c 0 \
- -a own -b $h3mac -t udp -q &
+ start_traffic $h2.111 192.0.2.129 192.0.2.130 $h3mac
local d0=$(date +%s)
local t0=$(ethtool_stats_get $h3 rx_octets_prio_1)
@@ -375,8 +314,7 @@ test_uc_aware()
((attempts == passes))
check_err $?
- # Suppress noise from killing mausezahn.
- { kill %% && wait; } 2>/dev/null
+ stop_traffic
log_test "MC performace under UC overload"
echo " ingress UC throughput $(humanize ${uc_ir})"
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
index c4cf6e6d800e..5c39e5f6a480 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
@@ -11,6 +11,7 @@ lib_dir=$(dirname $0)/../../../net/forwarding
ALL_TESTS="
rif_set_addr_test
+ rif_vrf_set_addr_test
rif_inherit_bridge_addr_test
rif_non_inherit_bridge_addr_test
vlan_interface_deletion_test
@@ -26,6 +27,8 @@ ALL_TESTS="
lag_dev_deletion_test
vlan_interface_uppers_test
bridge_extern_learn_test
+ neigh_offload_test
+ nexthop_offload_test
devlink_reload_test
"
NUM_NETIFS=2
@@ -98,6 +101,25 @@ rif_set_addr_test()
ip link set dev $swp1 addr $swp1_mac
}
+rif_vrf_set_addr_test()
+{
+ # Test that it is possible to set an IP address on a VRF upper despite
+ # its random MAC address.
+ RET=0
+
+ ip link add name vrf-test type vrf table 10
+ ip link set dev $swp1 master vrf-test
+
+ ip -4 address add 192.0.2.1/24 dev vrf-test
+ check_err $? "failed to set IPv4 address on VRF"
+ ip -6 address add 2001:db8:1::1/64 dev vrf-test
+ check_err $? "failed to set IPv6 address on VRF"
+
+ log_test "RIF - setting IP address on VRF"
+
+ ip link del dev vrf-test
+}
+
rif_inherit_bridge_addr_test()
{
RET=0
@@ -561,6 +583,77 @@ bridge_extern_learn_test()
ip link del dev br0
}
+neigh_offload_test()
+{
+ # Test that IPv4 and IPv6 neighbour entries are marked as offloaded
+ RET=0
+
+ ip -4 address add 192.0.2.1/24 dev $swp1
+ ip -6 address add 2001:db8:1::1/64 dev $swp1
+
+ ip -4 neigh add 192.0.2.2 lladdr de:ad:be:ef:13:37 nud perm dev $swp1
+ ip -6 neigh add 2001:db8:1::2 lladdr de:ad:be:ef:13:37 nud perm \
+ dev $swp1
+
+ ip -4 neigh show dev $swp1 | grep 192.0.2.2 | grep -q offload
+ check_err $? "ipv4 neigh entry not marked as offloaded when should"
+ ip -6 neigh show dev $swp1 | grep 2001:db8:1::2 | grep -q offload
+ check_err $? "ipv6 neigh entry not marked as offloaded when should"
+
+ log_test "neighbour offload indication"
+
+ ip -6 neigh del 2001:db8:1::2 dev $swp1
+ ip -4 neigh del 192.0.2.2 dev $swp1
+ ip -6 address del 2001:db8:1::1/64 dev $swp1
+ ip -4 address del 192.0.2.1/24 dev $swp1
+}
+
+nexthop_offload_test()
+{
+ # Test that IPv4 and IPv6 nexthops are marked as offloaded
+ RET=0
+
+ sysctl_set net.ipv6.conf.$swp2.keep_addr_on_down 1
+ simple_if_init $swp1 192.0.2.1/24 2001:db8:1::1/64
+ simple_if_init $swp2 192.0.2.2/24 2001:db8:1::2/64
+ setup_wait
+
+ ip -4 route add 198.51.100.0/24 vrf v$swp1 \
+ nexthop via 192.0.2.2 dev $swp1
+ ip -6 route add 2001:db8:2::/64 vrf v$swp1 \
+ nexthop via 2001:db8:1::2 dev $swp1
+
+ ip -4 route show 198.51.100.0/24 vrf v$swp1 | grep -q offload
+ check_err $? "ipv4 nexthop not marked as offloaded when should"
+ ip -6 route show 2001:db8:2::/64 vrf v$swp1 | grep -q offload
+ check_err $? "ipv6 nexthop not marked as offloaded when should"
+
+ ip link set dev $swp2 down
+ sleep 1
+
+ ip -4 route show 198.51.100.0/24 vrf v$swp1 | grep -q offload
+ check_fail $? "ipv4 nexthop marked as offloaded when should not"
+ ip -6 route show 2001:db8:2::/64 vrf v$swp1 | grep -q offload
+ check_fail $? "ipv6 nexthop marked as offloaded when should not"
+
+ ip link set dev $swp2 up
+ setup_wait
+
+ ip -4 route show 198.51.100.0/24 vrf v$swp1 | grep -q offload
+ check_err $? "ipv4 nexthop not marked as offloaded after neigh add"
+ ip -6 route show 2001:db8:2::/64 vrf v$swp1 | grep -q offload
+ check_err $? "ipv6 nexthop not marked as offloaded after neigh add"
+
+ log_test "nexthop offload indication"
+
+ ip -6 route del 2001:db8:2::/64 vrf v$swp1
+ ip -4 route del 198.51.100.0/24 vrf v$swp1
+
+ simple_if_fini $swp2 192.0.2.2/24 2001:db8:1::2/64
+ simple_if_fini $swp1 192.0.2.1/24 2001:db8:1::1/64
+ sysctl_restore net.ipv6.conf.$swp2.keep_addr_on_down
+}
+
devlink_reload_test()
{
# Test that after executing all the above configuration tests, a
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
index b41d6256b2d0..fb850e0ec837 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
@@ -9,10 +9,12 @@ lib_dir=$(dirname $0)/../../../../net/forwarding
ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
multiple_masks_test ctcam_edge_cases_test delta_simple_test \
+ delta_two_masks_one_key_test delta_simple_rehash_test \
bloom_simple_test bloom_complex_test bloom_delta_test"
NUM_NETIFS=2
-source $lib_dir/tc_common.sh
source $lib_dir/lib.sh
+source $lib_dir/tc_common.sh
+source $lib_dir/devlink_lib.sh
tcflags="skip_hw"
@@ -38,6 +40,55 @@ h2_destroy()
simple_if_fini $h2 192.0.2.2/24 198.51.100.2/24
}
+tp_record()
+{
+ local tracepoint=$1
+ local cmd=$2
+
+ perf record -q -e $tracepoint $cmd
+ return $?
+}
+
+tp_record_all()
+{
+ local tracepoint=$1
+ local seconds=$2
+
+ perf record -a -q -e $tracepoint sleep $seconds
+ return $?
+}
+
+__tp_hit_count()
+{
+ local tracepoint=$1
+
+ local perf_output=`perf script -F trace:event,trace`
+ return `echo $perf_output | grep "$tracepoint:" | wc -l`
+}
+
+tp_check_hits()
+{
+ local tracepoint=$1
+ local count=$2
+
+ __tp_hit_count $tracepoint
+ if [[ "$?" -ne "$count" ]]; then
+ return 1
+ fi
+ return 0
+}
+
+tp_check_hits_any()
+{
+ local tracepoint=$1
+
+ __tp_hit_count $tracepoint
+ if [[ "$?" -eq "0" ]]; then
+ return 1
+ fi
+ return 0
+}
+
single_mask_test()
{
# When only a single mask is required, the device uses the master
@@ -182,20 +233,38 @@ multiple_masks_test()
# spillage is performed correctly and that the right filter is
# matched
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
local index
RET=0
NUM_MASKS=32
+ NUM_ERPS=16
BASE_INDEX=100
for i in $(eval echo {1..$NUM_MASKS}); do
index=$((BASE_INDEX - i))
- tc filter add dev $h2 ingress protocol ip pref $index \
- handle $index \
- flower $tcflags dst_ip 192.0.2.2/${i} src_ip 192.0.2.1 \
- action drop
+ if ((i > NUM_ERPS)); then
+ exp_hits=1
+ err_msg="$i filters - C-TCAM spill did not happen when it was expected"
+ else
+ exp_hits=0
+ err_msg="$i filters - C-TCAM spill happened when it should not"
+ fi
+
+ tp_record "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" \
+ "tc filter add dev $h2 ingress protocol ip pref $index \
+ handle $index \
+ flower $tcflags \
+ dst_ip 192.0.2.2/${i} src_ip 192.0.2.1/${i} \
+ action drop"
+ tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" \
+ $exp_hits
+ check_err $? "$err_msg"
$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 \
-B 192.0.2.2 -t ip -q
@@ -325,28 +394,6 @@ ctcam_edge_cases_test()
ctcam_no_atcam_masks_test
}
-tp_record()
-{
- local tracepoint=$1
- local cmd=$2
-
- perf record -q -e $tracepoint $cmd
- return $?
-}
-
-tp_check_hits()
-{
- local tracepoint=$1
- local count=$2
-
- perf_output=`perf script -F trace:event,trace`
- hits=`echo $perf_output | grep "$tracepoint:" | wc -l`
- if [[ "$count" -ne "$hits" ]]; then
- return 1
- fi
- return 0
-}
-
delta_simple_test()
{
# The first filter will create eRP, the second filter will fit into
@@ -405,6 +452,365 @@ delta_simple_test()
log_test "delta simple test ($tcflags)"
}
+delta_two_masks_one_key_test()
+{
+ # If 2 keys are the same and only differ in mask in a way that
+ # they belong under the same ERP (second is delta of the first),
+ # there should be no C-TCAM spill.
+
+ RET=0
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ tp_record "mlxsw:*" "tc filter add dev $h2 ingress protocol ip \
+ pref 1 handle 101 flower $tcflags dst_ip 192.0.2.0/24 \
+ action drop"
+ tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 0
+ check_err $? "incorrect C-TCAM spill while inserting the first rule"
+
+ tp_record "mlxsw:*" "tc filter add dev $h2 ingress protocol ip \
+ pref 2 handle 102 flower $tcflags dst_ip 192.0.2.2 \
+ action drop"
+ tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 0
+ check_err $? "incorrect C-TCAM spill while inserting the second rule"
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_err $? "Did not match on correct filter"
+
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct filter"
+
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+
+ log_test "delta two masks one key test ($tcflags)"
+}
+
+delta_simple_rehash_test()
+{
+ RET=0
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 0
+ check_err $? "Failed to set ACL region rehash interval"
+
+ tp_record_all mlxsw:mlxsw_sp_acl_tcam_vregion_rehash 7
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_fail $? "Rehash trace was hit even when rehash should be disabled"
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 3000
+ check_err $? "Failed to set ACL region rehash interval"
+
+ sleep 1
+
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+ $tcflags dst_ip 192.0.1.0/25 action drop
+ tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
+ $tcflags dst_ip 192.0.2.2 action drop
+ tc filter add dev $h2 ingress protocol ip pref 3 handle 103 flower \
+ $tcflags dst_ip 192.0.3.0/24 action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct filter"
+
+ tp_record_all mlxsw:* 3
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_err $? "Rehash trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate
+ check_err $? "Migrate trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate_end
+ check_err $? "Migrate end trace was not hit"
+ tp_record_all mlxsw:* 3
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_err $? "Rehash trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate
+ check_fail $? "Migrate trace was hit when no migration should happen"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate_end
+ check_fail $? "Migrate end trace was hit when no migration should happen"
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 102 2
+ check_err $? "Did not match on correct filter after rehash"
+
+ tc filter del dev $h2 ingress protocol ip pref 3 handle 103 flower
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+
+ log_test "delta simple rehash test ($tcflags)"
+}
+
+delta_simple_ipv6_rehash_test()
+{
+ RET=0
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 0
+ check_err $? "Failed to set ACL region rehash interval"
+
+ tp_record_all mlxsw:mlxsw_sp_acl_tcam_vregion_rehash 7
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_fail $? "Rehash trace was hit even when rehash should be disabled"
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 3000
+ check_err $? "Failed to set ACL region rehash interval"
+
+ sleep 1
+
+ tc filter add dev $h2 ingress protocol ipv6 pref 1 handle 101 flower \
+ $tcflags dst_ip 2001:db8:1::0/121 action drop
+ tc filter add dev $h2 ingress protocol ipv6 pref 2 handle 102 flower \
+ $tcflags dst_ip 2001:db8:2::2 action drop
+ tc filter add dev $h2 ingress protocol ipv6 pref 3 handle 103 flower \
+ $tcflags dst_ip 2001:db8:3::0/120 action drop
+
+ $MZ $h1 -6 -c 1 -p 64 -a $h1mac -b $h2mac \
+ -A 2001:db8:2::1 -B 2001:db8:2::2 -t udp -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct filter"
+
+ tp_record_all mlxsw:* 3
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_err $? "Rehash trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate
+ check_err $? "Migrate trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate_end
+ check_err $? "Migrate end trace was not hit"
+ tp_record_all mlxsw:* 3
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_err $? "Rehash trace was not hit"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate
+ check_fail $? "Migrate trace was hit when no migration should happen"
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_migrate_end
+ check_fail $? "Migrate end trace was hit when no migration should happen"
+
+ $MZ $h1 -6 -c 1 -p 64 -a $h1mac -b $h2mac \
+ -A 2001:db8:2::1 -B 2001:db8:2::2 -t udp -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 102 2
+ check_err $? "Did not match on correct filter after rehash"
+
+ tc filter del dev $h2 ingress protocol ipv6 pref 3 handle 103 flower
+ tc filter del dev $h2 ingress protocol ipv6 pref 2 handle 102 flower
+ tc filter del dev $h2 ingress protocol ipv6 pref 1 handle 101 flower
+
+ log_test "delta simple IPv6 rehash test ($tcflags)"
+}
+
+TEST_RULE_BASE=256
+declare -a test_rules_inserted
+
+test_rule_add()
+{
+ local iface=$1
+ local tcflags=$2
+ local index=$3
+
+ if ! [ ${test_rules_inserted[$index]} ] ; then
+ test_rules_inserted[$index]=false
+ fi
+ if ${test_rules_inserted[$index]} ; then
+ return
+ fi
+
+ local number=$(( $index + $TEST_RULE_BASE ))
+ printf -v hexnumber '%x' $number
+
+ batch="${batch}filter add dev $iface ingress protocol ipv6 pref 1 \
+ handle $number flower $tcflags \
+ src_ip 2001:db8:1::$hexnumber action drop\n"
+ test_rules_inserted[$index]=true
+}
+
+test_rule_del()
+{
+ local iface=$1
+ local index=$2
+
+ if ! [ ${test_rules_inserted[$index]} ] ; then
+ test_rules_inserted[$index]=false
+ fi
+ if ! ${test_rules_inserted[$index]} ; then
+ return
+ fi
+
+ local number=$(( $index + $TEST_RULE_BASE ))
+ printf -v hexnumber '%x' $number
+
+ batch="${batch}filter del dev $iface ingress protocol ipv6 pref 1 \
+ handle $number flower\n"
+ test_rules_inserted[$index]=false
+}
+
+test_rule_add_or_remove()
+{
+ local iface=$1
+ local tcflags=$2
+ local index=$3
+
+ if ! [ ${test_rules_inserted[$index]} ] ; then
+ test_rules_inserted[$index]=false
+ fi
+ if ${test_rules_inserted[$index]} ; then
+ test_rule_del $iface $index
+ else
+ test_rule_add $iface $tcflags $index
+ fi
+}
+
+test_rule_add_or_remove_random_batch()
+{
+ local iface=$1
+ local tcflags=$2
+ local total_count=$3
+ local skip=0
+ local count=0
+ local MAXSKIP=20
+ local MAXCOUNT=20
+
+ for ((i=1;i<=total_count;i++)); do
+ if (( $skip == 0 )) && (($count == 0)); then
+ ((skip=$RANDOM % $MAXSKIP + 1))
+ ((count=$RANDOM % $MAXCOUNT + 1))
+ fi
+ if (( $skip != 0 )); then
+ ((skip-=1))
+ else
+ ((count-=1))
+ test_rule_add_or_remove $iface $tcflags $i
+ fi
+ done
+}
+
+delta_massive_ipv6_rehash_test()
+{
+ RET=0
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 0
+ check_err $? "Failed to set ACL region rehash interval"
+
+ tp_record_all mlxsw:mlxsw_sp_acl_tcam_vregion_rehash 7
+ tp_check_hits_any mlxsw:mlxsw_sp_acl_tcam_vregion_rehash
+ check_fail $? "Rehash trace was hit even when rehash should be disabled"
+
+ RANDOM=4432897
+ declare batch=""
+ test_rule_add_or_remove_random_batch $h2 $tcflags 5000
+
+ echo -n -e $batch | tc -b -
+
+ declare batch=""
+ test_rule_add_or_remove_random_batch $h2 $tcflags 5000
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 3000
+ check_err $? "Failed to set ACL region rehash interval"
+
+ sleep 1
+
+ tc filter add dev $h2 ingress protocol ipv6 pref 1 handle 101 flower \
+ $tcflags dst_ip 2001:db8:1::0/121 action drop
+ tc filter add dev $h2 ingress protocol ipv6 pref 2 handle 102 flower \
+ $tcflags dst_ip 2001:db8:2::2 action drop
+ tc filter add dev $h2 ingress protocol ipv6 pref 3 handle 103 flower \
+ $tcflags dst_ip 2001:db8:3::0/120 action drop
+
+ $MZ $h1 -6 -c 1 -p 64 -a $h1mac -b $h2mac \
+ -A 2001:db8:2::1 -B 2001:db8:2::2 -t udp -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct filter"
+
+ echo -n -e $batch | tc -b -
+
+ devlink dev param set $DEVLINK_DEV \
+ name acl_region_rehash_interval cmode runtime value 0
+ check_err $? "Failed to set ACL region rehash interval"
+
+ $MZ $h1 -6 -c 1 -p 64 -a $h1mac -b $h2mac \
+ -A 2001:db8:2::1 -B 2001:db8:2::2 -t udp -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_fail $? "Matched a wrong filter after rehash"
+
+ tc_check_packets "dev $h2 ingress" 102 2
+ check_err $? "Did not match on correct filter after rehash"
+
+ tc filter del dev $h2 ingress protocol ipv6 pref 3 handle 103 flower
+ tc filter del dev $h2 ingress protocol ipv6 pref 2 handle 102 flower
+ tc filter del dev $h2 ingress protocol ipv6 pref 1 handle 101 flower
+
+ declare batch=""
+ for i in {1..5000}; do
+ test_rule_del $h2 $tcflags $i
+ done
+ echo -e $batch | tc -b -
+
+ log_test "delta massive IPv6 rehash test ($tcflags)"
+}
+
bloom_simple_test()
{
# Bloom filter requires that the eRP table is used. This test
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh
index b1fe960e398a..6f2683cbc7d5 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh
@@ -1,7 +1,10 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+lib_dir=$(dirname $0)/../../../../net/forwarding
+
NUM_NETIFS=1
+source $lib_dir/lib.sh
source devlink_lib_spectrum.sh
setup_prepare()
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
index a0a80e1a69e8..43ba1b438f6d 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
@@ -1,9 +1,11 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+lib_dir=$(dirname $0)/../../../../net/forwarding
+
NUM_NETIFS=6
-source ../../../../net/forwarding/lib.sh
-source ../../../../net/forwarding/tc_common.sh
+source $lib_dir/lib.sh
+source $lib_dir/tc_common.sh
source devlink_lib_spectrum.sh
current_test=""
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh
new file mode 100755
index 000000000000..749ba3cfda1d
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh
@@ -0,0 +1,126 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test vetoing of FDB entries that mlxsw can not offload. This exercises several
+# different veto vectors to test various rollback scenarios in the vxlan driver.
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ fdb_create_veto_test
+ fdb_replace_veto_test
+ fdb_append_veto_test
+ fdb_changelink_veto_test
+"
+NUM_NETIFS=2
+source $lib_dir/lib.sh
+
+setup_prepare()
+{
+ swp1=${NETIFS[p1]}
+ swp2=${NETIFS[p2]}
+
+ ip link add dev br0 type bridge mcast_snooping 0
+
+ ip link set dev $swp1 up
+ ip link set dev $swp1 master br0
+ ip link set dev $swp2 up
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+ ip link set dev vxlan0 master br0
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ip link set dev vxlan0 nomaster
+ ip link del dev vxlan0
+
+ ip link set dev $swp2 down
+ ip link set dev $swp1 nomaster
+ ip link set dev $swp1 down
+
+ ip link del dev br0
+}
+
+fdb_create_veto_test()
+{
+ RET=0
+
+ bridge fdb add 01:02:03:04:05:06 dev vxlan0 self static \
+ dst 198.51.100.2 2>/dev/null
+ check_fail $? "multicast MAC not rejected"
+
+ bridge fdb add 01:02:03:04:05:06 dev vxlan0 self static \
+ dst 198.51.100.2 2>&1 >/dev/null | grep -q mlxsw_spectrum
+ check_err $? "multicast MAC rejected without extack"
+
+ log_test "vxlan FDB veto - create"
+}
+
+fdb_replace_veto_test()
+{
+ RET=0
+
+ bridge fdb add 00:01:02:03:04:05 dev vxlan0 self static \
+ dst 198.51.100.2
+ check_err $? "valid FDB rejected"
+
+ bridge fdb replace 00:01:02:03:04:05 dev vxlan0 self static \
+ dst 198.51.100.2 port 1234 2>/dev/null
+ check_fail $? "FDB with an explicit port not rejected"
+
+ bridge fdb replace 00:01:02:03:04:05 dev vxlan0 self static \
+ dst 198.51.100.2 port 1234 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $? "FDB with an explicit port rejected without extack"
+
+ log_test "vxlan FDB veto - replace"
+}
+
+fdb_append_veto_test()
+{
+ RET=0
+
+ bridge fdb add 00:00:00:00:00:00 dev vxlan0 self static \
+ dst 198.51.100.2
+ check_err $? "valid FDB rejected"
+
+ bridge fdb append 00:00:00:00:00:00 dev vxlan0 self static \
+ dst 198.51.100.3 port 1234 2>/dev/null
+ check_fail $? "FDB with an explicit port not rejected"
+
+ bridge fdb append 00:00:00:00:00:00 dev vxlan0 self static \
+ dst 198.51.100.3 port 1234 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $? "FDB with an explicit port rejected without extack"
+
+ log_test "vxlan FDB veto - append"
+}
+
+fdb_changelink_veto_test()
+{
+ RET=0
+
+ ip link set dev vxlan0 type vxlan \
+ group 224.0.0.1 dev lo 2>/dev/null
+ check_fail $? "FDB with a multicast IP not rejected"
+
+ ip link set dev vxlan0 type vxlan \
+ group 224.0.0.1 dev lo 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $? "FDB with a multicast IP rejected without extack"
+
+ log_test "vxlan FDB veto - changelink"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/efivarfs/Makefile b/tools/testing/selftests/efivarfs/Makefile
index c49dcea69319..e3181338ba5e 100644
--- a/tools/testing/selftests/efivarfs/Makefile
+++ b/tools/testing/selftests/efivarfs/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
CFLAGS = -Wall
TEST_GEN_FILES := open-unlink create-read
diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
index a47029a799d2..a90f394f9aa9 100755
--- a/tools/testing/selftests/efivarfs/efivarfs.sh
+++ b/tools/testing/selftests/efivarfs/efivarfs.sh
@@ -7,6 +7,12 @@ test_guid=210be57c-9849-4fc7-a635-e6382d1aec27
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
+file_cleanup()
+{
+ chattr -i $1
+ rm -f $1
+}
+
check_prereqs()
{
local msg="skip all tests:"
@@ -58,8 +64,10 @@ test_create()
if [ $(stat -c %s $file) -ne 5 ]; then
echo "$file has invalid size" >&2
+ file_cleanup $file
exit 1
fi
+ file_cleanup $file
}
test_create_empty()
@@ -72,12 +80,14 @@ test_create_empty()
echo "$file can not be created without writing" >&2
exit 1
fi
+ file_cleanup $file
}
test_create_read()
{
local file=$efivarfs_mount/$FUNCNAME-$test_guid
./create-read $file
+ file_cleanup $file
}
test_delete()
@@ -92,11 +102,7 @@ test_delete()
exit 1
fi
- rm $file 2>/dev/null
- if [ $? -ne 0 ]; then
- chattr -i $file
- rm $file
- fi
+ file_cleanup $file
if [ -e $file ]; then
echo "$file couldn't be deleted" >&2
@@ -150,11 +156,7 @@ test_valid_filenames()
echo "$file could not be created" >&2
ret=1
else
- rm $file 2>/dev/null
- if [ $? -ne 0 ]; then
- chattr -i $file
- rm $file
- fi
+ file_cleanup $file
fi
done
@@ -187,11 +189,7 @@ test_invalid_filenames()
if [ -e $file ]; then
echo "Creating $file should have failed" >&2
- rm $file 2>/dev/null
- if [ $? -ne 0 ]; then
- chattr -i $file
- rm $file
- fi
+ file_cleanup $file
ret=1
fi
done
diff --git a/tools/testing/selftests/exec/.gitignore b/tools/testing/selftests/exec/.gitignore
index 64073e050c6a..b02279da6fa1 100644
--- a/tools/testing/selftests/exec/.gitignore
+++ b/tools/testing/selftests/exec/.gitignore
@@ -6,4 +6,5 @@ execveat.moved
execveat.path.ephemeral
execveat.ephemeral
execveat.denatured
-xxxxxxxx* \ No newline at end of file
+/recursion-depth
+xxxxxxxx*
diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
index 427c41ba5151..33339e31e365 100644
--- a/tools/testing/selftests/exec/Makefile
+++ b/tools/testing/selftests/exec/Makefile
@@ -1,11 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS = -Wall
+CFLAGS += -Wno-nonnull
+CFLAGS += -D_GNU_SOURCE
TEST_GEN_PROGS := execveat
TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir
# Makefile is a run-time dependency, since it's accessed by the execveat test
TEST_FILES := Makefile
+TEST_GEN_PROGS += recursion-depth
+
EXTRA_CLEAN := $(OUTPUT)/subdir.moved $(OUTPUT)/execveat.moved $(OUTPUT)/xxxxx*
include ../lib.mk
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c
index 47cbf54d0801..cbb6efbdb786 100644
--- a/tools/testing/selftests/exec/execveat.c
+++ b/tools/testing/selftests/exec/execveat.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014 Google, Inc.
*
- * Licensed under the terms of the GNU GPL License version 2
- *
* Selftests for execveat(2).
*/
diff --git a/tools/testing/selftests/exec/recursion-depth.c b/tools/testing/selftests/exec/recursion-depth.c
new file mode 100644
index 000000000000..2dbd5bc45b3e
--- /dev/null
+++ b/tools/testing/selftests/exec/recursion-depth.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2019 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/* Test that pointing #! script interpreter to self doesn't recurse. */
+#include <errno.h>
+#include <sched.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/mount.h>
+#include <unistd.h>
+
+int main(void)
+{
+ if (unshare(CLONE_NEWNS) == -1) {
+ if (errno == ENOSYS || errno == EPERM) {
+ fprintf(stderr, "error: unshare, errno %d\n", errno);
+ return 4;
+ }
+ fprintf(stderr, "error: unshare, errno %d\n", errno);
+ return 1;
+ }
+ if (mount(NULL, "/", NULL, MS_PRIVATE|MS_REC, NULL) == -1) {
+ fprintf(stderr, "error: mount '/', errno %d\n", errno);
+ return 1;
+ }
+ /* Require "exec" filesystem. */
+ if (mount(NULL, "/tmp", "ramfs", 0, NULL) == -1) {
+ fprintf(stderr, "error: mount ramfs, errno %d\n", errno);
+ return 1;
+ }
+
+#define FILENAME "/tmp/1"
+
+ int fd = creat(FILENAME, 0700);
+ if (fd == -1) {
+ fprintf(stderr, "error: creat, errno %d\n", errno);
+ return 1;
+ }
+#define S "#!" FILENAME "\n"
+ if (write(fd, S, strlen(S)) != strlen(S)) {
+ fprintf(stderr, "error: write, errno %d\n", errno);
+ return 1;
+ }
+ close(fd);
+
+ int rv = execve(FILENAME, NULL, NULL);
+ if (rv == -1 && errno == ELOOP) {
+ return 0;
+ }
+ fprintf(stderr, "error: execve, rv %d, errno %d\n", rv, errno);
+ return 1;
+}
diff --git a/tools/testing/selftests/firmware/Makefile b/tools/testing/selftests/firmware/Makefile
index 261c81f08606..012b2cf69c11 100644
--- a/tools/testing/selftests/firmware/Makefile
+++ b/tools/testing/selftests/firmware/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
# Makefile for firmware loading selftests
# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
diff --git a/tools/testing/selftests/firmware/config b/tools/testing/selftests/firmware/config
index 913a25a4a32b..bf634dda0720 100644
--- a/tools/testing/selftests/firmware/config
+++ b/tools/testing/selftests/firmware/config
@@ -1,6 +1,5 @@
CONFIG_TEST_FIRMWARE=y
CONFIG_FW_LOADER=y
CONFIG_FW_LOADER_USER_HELPER=y
-CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index 466cf2f91ba0..a4320c4b44dc 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -155,8 +155,11 @@ read_firmwares()
{
for i in $(seq 0 3); do
config_set_read_fw_idx $i
- # Verify the contents match
- if ! diff -q "$FW" $DIR/read_firmware 2>/dev/null ; then
+ # Verify the contents are what we expect.
+ # -Z required for now -- check for yourself, md5sum
+ # on $FW and DIR/read_firmware will yield the same. Even
+ # cmp agrees, so something is off.
+ if ! diff -q -Z "$FW" $DIR/read_firmware 2>/dev/null ; then
echo "request #$i: firmware was not loaded" >&2
exit 1
fi
@@ -168,7 +171,7 @@ read_firmwares_expect_nofile()
for i in $(seq 0 3); do
config_set_read_fw_idx $i
# Ensures contents differ
- if diff -q "$FW" $DIR/read_firmware 2>/dev/null ; then
+ if diff -q -Z "$FW" $DIR/read_firmware 2>/dev/null ; then
echo "request $i: file was not expected to match" >&2
exit 1
fi
diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh
index 6c5f1b2ffb74..1cbb12e284a6 100755
--- a/tools/testing/selftests/firmware/fw_lib.sh
+++ b/tools/testing/selftests/firmware/fw_lib.sh
@@ -91,7 +91,7 @@ verify_reqs()
if [ "$TEST_REQS_FW_SYSFS_FALLBACK" = "yes" ]; then
if [ ! "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
echo "usermode helper disabled so ignoring test"
- exit $ksft_skip
+ exit 0
fi
fi
}
diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
index 75244db70331..6d5e9e87c4b7 100755
--- a/tools/testing/selftests/ftrace/ftracetest
+++ b/tools/testing/selftests/ftrace/ftracetest
@@ -1,11 +1,11 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
# ftracetest - Ftrace test shell scripts
#
# Copyright (C) Hitachi Ltd., 2014
# Written by Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
#
-# Released under the terms of the GPL v2.
usage() { # errno [message]
[ ! -z "$2" ] && echo $2
@@ -154,17 +154,17 @@ fi
# Define text colors
# Check available colors on the terminal, if any
-ncolors=`tput colors 2>/dev/null`
+ncolors=`tput colors 2>/dev/null || echo 0`
color_reset=
color_red=
color_green=
color_blue=
# If stdout exists and number of colors is eight or more, use them
-if [ -t 1 -a "$ncolors" -a "$ncolors" -ge 8 ]; then
- color_reset="\e[0m"
- color_red="\e[31m"
- color_green="\e[32m"
- color_blue="\e[34m"
+if [ -t 1 -a "$ncolors" -ge 8 ]; then
+ color_reset="\033[0m"
+ color_red="\033[31m"
+ color_green="\033[32m"
+ color_blue="\033[34m"
fi
strip_esc() {
@@ -173,8 +173,13 @@ strip_esc() {
}
prlog() { # messages
- echo -e "$@"
- [ "$LOG_FILE" ] && echo -e "$@" | strip_esc >> $LOG_FILE
+ newline="\n"
+ if [ "$1" = "-n" ] ; then
+ newline=
+ shift
+ fi
+ printf "$*$newline"
+ [ "$LOG_FILE" ] && printf "$*$newline" | strip_esc >> $LOG_FILE
}
catlog() { #file
cat $1
@@ -313,6 +318,7 @@ run_test() { # testfile
local testlog=/proc/self/fd/1
fi
export TMPDIR=`mktemp -d /tmp/ftracetest-dir.XXXXXX`
+ export FTRACETEST_ROOT=$TOP_DIR
echo "execute$INSTANCE: "$1 > $testlog
SIG_RESULT=0
if [ $VERBOSE -eq -1 ]; then
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc
new file mode 100644
index 000000000000..021c03fd885d
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc
@@ -0,0 +1,19 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: ftrace - test tracing error log support
+
+fail() { #msg
+ echo $1
+ exit_fail
+}
+
+# event tracing is currently the only ftrace tracer that uses the
+# tracing error_log, hence this check
+if [ ! -f set_event ]; then
+ echo "event tracing is not supported"
+ exit_unsupported
+fi
+
+ftrace_errlog_check 'event filter parse error' '((sig >= 10 && sig < 15) || dsig ^== 17) && comm != bash' 'events/signal/signal_generate/filter'
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions
index 7b96e80e6b8a..779ec11f61bd 100644
--- a/tools/testing/selftests/ftrace/test.d/functions
+++ b/tools/testing/selftests/ftrace/test.d/functions
@@ -109,3 +109,15 @@ LOCALHOST=127.0.0.1
yield() {
ping $LOCALHOST -c 1 || sleep .001 || usleep 1 || sleep 1
}
+
+ftrace_errlog_check() { # err-prefix command-with-error-pos-by-^ command-file
+ pos=$(echo -n "${2%^*}" | wc -c) # error position
+ command=$(echo "$2" | tr -d ^)
+ echo "Test command: $command"
+ echo > error_log
+ (! echo "$command" > "$3" ) 2> /dev/null
+ grep "$1: error:" -A 3 error_log
+ N=$(tail -n 1 error_log | wc -c)
+ # " Command: " and "^\n" => 13
+ test $(expr 13 + $pos) -eq $N
+}
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc
index 492426e95e09..7650a82db3f5 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc
@@ -3,7 +3,7 @@
# description: Kprobe dynamic event with function tracer
[ -f kprobe_events ] || exit_unsupported # this is configurable
-grep function available_tracers || exit_unsupported # this is configurable
+grep "function" available_tracers || exit_unsupported # this is configurable
# prepare
echo nop > current_tracer
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
new file mode 100644
index 000000000000..29faaec942c6
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
@@ -0,0 +1,85 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Kprobe event parser error log check
+
+[ -f kprobe_events ] || exit_unsupported # this is configurable
+
+[ -f error_log ] || exit_unsupported
+
+check_error() { # command-with-error-pos-by-^
+ ftrace_errlog_check 'trace_kprobe' "$1" 'kprobe_events'
+}
+
+if grep -q 'r\[maxactive\]' README; then
+check_error 'p^100 vfs_read' # MAXACT_NO_KPROBE
+check_error 'r^1a111 vfs_read' # BAD_MAXACT
+check_error 'r^100000 vfs_read' # MAXACT_TOO_BIG
+fi
+
+check_error 'p ^non_exist_func' # BAD_PROBE_ADDR (enoent)
+check_error 'p ^hoge-fuga' # BAD_PROBE_ADDR (bad syntax)
+check_error 'p ^hoge+1000-1000' # BAD_PROBE_ADDR (bad syntax)
+check_error 'r ^vfs_read+10' # BAD_RETPROBE
+check_error 'p:^/bar vfs_read' # NO_GROUP_NAME
+check_error 'p:^12345678901234567890123456789012345678901234567890123456789012345/bar vfs_read' # GROUP_TOO_LONG
+
+check_error 'p:^foo.1/bar vfs_read' # BAD_GROUP_NAME
+check_error 'p:foo/^ vfs_read' # NO_EVENT_NAME
+check_error 'p:foo/^12345678901234567890123456789012345678901234567890123456789012345 vfs_read' # EVENT_TOO_LONG
+check_error 'p:foo/^bar.1 vfs_read' # BAD_EVENT_NAME
+
+check_error 'p vfs_read ^$retval' # RETVAL_ON_PROBE
+check_error 'p vfs_read ^$stack10000' # BAD_STACK_NUM
+
+if grep -q '$arg<N>' README; then
+check_error 'p vfs_read ^$arg10000' # BAD_ARG_NUM
+fi
+
+check_error 'p vfs_read ^$none_var' # BAD_VAR
+
+check_error 'p vfs_read ^%none_reg' # BAD_REG_NAME
+check_error 'p vfs_read ^@12345678abcde' # BAD_MEM_ADDR
+check_error 'p vfs_read ^@+10' # FILE_ON_KPROBE
+
+check_error 'p vfs_read ^+0@0)' # DEREF_NEED_BRACE
+check_error 'p vfs_read ^+0ab1(@0)' # BAD_DEREF_OFFS
+check_error 'p vfs_read +0(+0(@0^)' # DEREF_OPEN_BRACE
+
+if grep -A1 "fetcharg:" README | grep -q '\$comm' ; then
+check_error 'p vfs_read +0(^$comm)' # COMM_CANT_DEREF
+fi
+
+check_error 'p vfs_read ^&1' # BAD_FETCH_ARG
+
+
+# We've introduced this limitation with array support
+if grep -q ' <type>\\\[<array-size>\\\]' README; then
+check_error 'p vfs_read +0(^+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(@0))))))))))))))' # TOO_MANY_OPS?
+check_error 'p vfs_read +0(@11):u8[10^' # ARRAY_NO_CLOSE
+check_error 'p vfs_read +0(@11):u8[10]^a' # BAD_ARRAY_SUFFIX
+check_error 'p vfs_read +0(@11):u8[^10a]' # BAD_ARRAY_NUM
+check_error 'p vfs_read +0(@11):u8[^256]' # ARRAY_TOO_BIG
+fi
+
+check_error 'p vfs_read @11:^unknown_type' # BAD_TYPE
+check_error 'p vfs_read $stack0:^string' # BAD_STRING
+check_error 'p vfs_read @11:^b10@a/16' # BAD_BITFIELD
+
+check_error 'p vfs_read ^arg123456789012345678901234567890=@11' # ARG_NAME_TOO_LOG
+check_error 'p vfs_read ^=@11' # NO_ARG_NAME
+check_error 'p vfs_read ^var.1=@11' # BAD_ARG_NAME
+check_error 'p vfs_read var1=@11 ^var1=@12' # USED_ARG_NAME
+check_error 'p vfs_read ^+1234567(+1234567(+1234567(+1234567(+1234567(+1234567(@1234))))))' # ARG_TOO_LONG
+check_error 'p vfs_read arg1=^' # NO_ARG_BODY
+
+# instruction boundary check is valid on x86 (at this moment)
+case $(uname -m) in
+ x86_64|i[3456]86)
+ echo 'p vfs_read' > kprobe_events
+ if grep -q FTRACE ../kprobes/list ; then
+ check_error 'p ^vfs_read+3' # BAD_INSN_BNDRY (only if function-tracer is enabled)
+ fi
+ ;;
+esac
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/uprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/uprobe_syntax_errors.tc
new file mode 100644
index 000000000000..14229d5778a0
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/uprobe_syntax_errors.tc
@@ -0,0 +1,23 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Uprobe event parser error log check
+
+[ -f uprobe_events ] || exit_unsupported # this is configurable
+
+[ -f error_log ] || exit_unsupported
+
+check_error() { # command-with-error-pos-by-^
+ ftrace_errlog_check 'trace_uprobe' "$1" 'uprobe_events'
+}
+
+check_error 'p ^/non_exist_file:100' # FILE_NOT_FOUND
+check_error 'p ^/sys:100' # NO_REGULAR_FILE
+check_error 'p /bin/sh:^10a' # BAD_UPROBE_OFFS
+check_error 'p /bin/sh:10(^1a)' # BAD_REFCNT
+check_error 'p /bin/sh:10(10^' # REFCNT_OPEN_BRACE
+check_error 'p /bin/sh:10(10)^a' # BAD_REFCNT_SUFFIX
+
+check_error 'p /bin/sh:10 ^@+ab' # BAD_FILE_OFFS
+check_error 'p /bin/sh:10 ^@symbol' # SYM_ON_UPROBE
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/selftest/bashisms.tc b/tools/testing/selftests/ftrace/test.d/selftest/bashisms.tc
new file mode 100644
index 000000000000..1b081e910e14
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/selftest/bashisms.tc
@@ -0,0 +1,21 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Meta-selftest: Checkbashisms
+
+if [ ! -f $FTRACETEST_ROOT/ftracetest ]; then
+ echo "Hmm, we can not find ftracetest"
+ exit_unresolved
+fi
+
+if ! which checkbashisms > /dev/null 2>&1 ; then
+ echo "No checkbashisms found. skipped."
+ exit_unresolved
+fi
+
+checkbashisms $FTRACETEST_ROOT/ftracetest
+checkbashisms $FTRACETEST_ROOT/test.d/functions
+for t in $(find $FTRACETEST_ROOT/test.d -name \*.tc); do
+ checkbashisms $t
+done
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc
new file mode 100644
index 000000000000..1221240f8cf6
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc
@@ -0,0 +1,30 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: event trigger - test inter-event histogram trigger expected fail actions
+
+fail() { #msg
+ echo $1
+ exit_fail
+}
+
+if [ ! -f set_event ]; then
+ echo "event tracing is not supported"
+ exit_unsupported
+fi
+
+if [ ! -f snapshot ]; then
+ echo "snapshot is not supported"
+ exit_unsupported
+fi
+
+grep -q "snapshot()" README || exit_unsupported # version issue
+
+echo "Test expected snapshot action failure"
+
+echo 'hist:keys=comm:onmatch(sched.sched_wakeup).snapshot()' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger && exit_fail
+
+echo "Test expected save action failure"
+
+echo 'hist:keys=comm:onmatch(sched.sched_wakeup).save(comm,prio)' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger && exit_fail
+
+exit_xfail
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
deleted file mode 100644
index 401104344593..000000000000
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/sh
-# description: event trigger - test extended error support
-
-
-fail() { #msg
- echo $1
- exit_fail
-}
-
-if [ ! -f set_event ]; then
- echo "event tracing is not supported"
- exit_unsupported
-fi
-
-if [ ! -f synthetic_events ]; then
- echo "synthetic event is not supported"
- exit_unsupported
-fi
-
-echo "Test extended error support"
-echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' > events/sched/sched_wakeup/trigger
-! echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger 2> /dev/null
-if ! grep -q "ERROR:" events/sched/sched_wakeup/hist; then
- fail "Failed to generate extended error in histogram"
-fi
-
-exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc
index f59b2a9a1f22..77be6e1f6e7b 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test field variable support
fail() { #msg
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
index 524d9ce361e2..f3eb8aacec0e 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test inter-event combined histogram trigger
fail() { #msg
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
index 4ddc546771b5..d281f056f980 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test multiple actions on hist trigger
fail() { #msg
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc
new file mode 100644
index 000000000000..064a284e4e75
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc
@@ -0,0 +1,28 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: event trigger - test inter-event histogram trigger onchange action
+
+fail() { #msg
+ echo $1
+ exit_fail
+}
+
+if [ ! -f set_event ]; then
+ echo "event tracing is not supported"
+ exit_unsupported
+fi
+
+grep -q "onchange(var)" README || exit_unsupported # version issue
+
+echo "Test onchange action"
+
+echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio) if comm=="ping"' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
+
+ping $LOCALHOST -c 3
+nice -n 1 ping $LOCALHOST -c 3
+
+if ! grep -q "changed:" events/sched/sched_waking/hist; then
+ fail "Failed to create onchange action inter-event histogram"
+fi
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc
index 39fb65b0cd9f..a708f0e7858a 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test inter-event histogram trigger onmatch action
fail() { #msg
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc
index 81ab3939c96a..dfce6932d8be 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test inter-event histogram trigger onmatch-onmax action
fail() { #msg
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc
index 1180ab5f0845..0035995c2194 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test inter-event histogram trigger onmax action
fail() { #msg
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc
new file mode 100644
index 000000000000..18fff69fc433
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc
@@ -0,0 +1,43 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: event trigger - test inter-event histogram trigger snapshot action
+
+fail() { #msg
+ echo $1
+ exit_fail
+}
+
+if [ ! -f set_event ]; then
+ echo "event tracing is not supported"
+ exit_unsupported
+fi
+
+if [ ! -f snapshot ]; then
+ echo "snapshot is not supported"
+ exit_unsupported
+fi
+
+grep -q "onchange(var)" README || exit_unsupported # version issue
+
+grep -q "snapshot()" README || exit_unsupported # version issue
+
+echo "Test snapshot action"
+
+echo 1 > /sys/kernel/debug/tracing/events/sched/enable
+
+echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio):onchange($newprio).snapshot() if comm=="ping"' >> /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
+
+ping $LOCALHOST -c 3
+nice -n 1 ping $LOCALHOST -c 3
+
+echo 0 > tracing_on
+
+if ! grep -q "changed:" events/sched/sched_waking/hist; then
+ fail "Failed to create onchange action inter-event histogram"
+fi
+
+if ! grep -q "comm=ping" snapshot; then
+ fail "Failed to create snapshot action inter-event histogram"
+fi
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc
index 41128219231a..df44b14724a4 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test synthetic event create remove
fail() { #msg
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc
new file mode 100644
index 000000000000..8021d60aafec
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc
@@ -0,0 +1,42 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: event trigger - test inter-event histogram trigger trace action
+
+fail() { #msg
+ echo $1
+ exit_fail
+}
+
+if [ ! -f set_event ]; then
+ echo "event tracing is not supported"
+ exit_unsupported
+fi
+
+if [ ! -f synthetic_events ]; then
+ echo "synthetic event is not supported"
+ exit_unsupported
+fi
+
+grep -q "trace(<synthetic_event>" README || exit_unsupported # version issue
+
+echo "Test create synthetic event"
+
+echo 'wakeup_latency u64 lat pid_t pid char comm[16]' > synthetic_events
+if [ ! -d events/synthetic/wakeup_latency ]; then
+ fail "Failed to create wakeup_latency synthetic event"
+fi
+
+echo "Test create histogram for synthetic event using trace action"
+echo "Test histogram variables,simple expression support and trace action"
+
+echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' > events/sched/sched_wakeup/trigger
+echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmatch(sched.sched_wakeup).trace(wakeup_latency,$wakeup_lat,next_pid,next_comm) if next_comm=="ping"' > events/sched/sched_switch/trigger
+echo 'hist:keys=comm,pid,lat:wakeup_lat=lat:sort=lat' > events/synthetic/wakeup_latency/trigger
+
+ping $LOCALHOST -c 5
+
+if ! grep -q "ping" events/synthetic/wakeup_latency/hist; then
+ fail "Failed to create trace action inter-event histogram"
+fi
+
+exit 0
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi.c b/tools/testing/selftests/futex/functional/futex_requeue_pi.c
index 54cd5c414e82..1ee5518ee6b7 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* Copyright © International Business Machines Corp., 2006-2008
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* DESCRIPTION
* This test excercises the futex syscall op codes needed for requeuing
* priority inheritance aware POSIX condition variables and mutexes.
@@ -395,6 +391,7 @@ int main(int argc, char *argv[])
}
ksft_print_header();
+ ksft_set_plan(1);
ksft_print_msg("%s: Test requeue functionality\n", basename(argv[0]));
ksft_print_msg(
"\tArguments: broadcast=%d locked=%d owner=%d timeout=%ldns\n",
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c b/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c
index 08187a16507f..d0a4d332ea44 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* Copyright © International Business Machines Corp., 2009
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* DESCRIPTION
* 1. Block a thread using FUTEX_WAIT
* 2. Attempt to use FUTEX_CMP_REQUEUE_PI on the futex from 1.
@@ -79,6 +75,7 @@ int main(int argc, char *argv[])
}
ksft_print_header();
+ ksft_set_plan(1);
ksft_print_msg("%s: Detect mismatched requeue_pi operations\n",
basename(argv[0]));
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
index f0542a344d95..f8c43ce8fe66 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* Copyright © International Business Machines Corp., 2006-2008
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* DESCRIPTION
* This test exercises the futex_wait_requeue_pi() signal handling both
* before and after the requeue. The first should be restarted by the
@@ -144,6 +140,7 @@ int main(int argc, char *argv[])
}
ksft_print_header();
+ ksft_set_plan(1);
ksft_print_msg("%s: Test signal handling during requeue_pi\n",
basename(argv[0]));
ksft_print_msg("\tArguments: <none>\n");
diff --git a/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c b/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c
index 6216de828093..fb4148f23fa3 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* Copyright FUJITSU LIMITED 2010
* Copyright KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* DESCRIPTION
* Internally, Futex has two handling mode, anon and file. The private file
* mapping is special. At first it behave as file, but after write anything
@@ -98,6 +94,7 @@ int main(int argc, char **argv)
}
ksft_print_header();
+ ksft_set_plan(1);
ksft_print_msg(
"%s: Test the futex value of private file mappings in FUTEX_WAIT\n",
basename(argv[0]));
diff --git a/tools/testing/selftests/futex/functional/futex_wait_timeout.c b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
index bab3dfe1787f..ee55e6d389a3 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_timeout.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* Copyright © International Business Machines Corp., 2009
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* DESCRIPTION
* Block on a futex and wait for timeout.
*
@@ -69,6 +65,7 @@ int main(int argc, char *argv[])
}
ksft_print_header();
+ ksft_set_plan(1);
ksft_print_msg("%s: Block on a futex and wait for timeout\n",
basename(argv[0]));
ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns);
diff --git a/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c b/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c
index 26975322545b..ed9cd07e31c1 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* Copyright FUJITSU LIMITED 2010
* Copyright KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* DESCRIPTION
* Wait on uninitialized heap. It shold be zero and FUTEX_WAIT should
* return immediately. This test is intent to test zero page handling in
@@ -100,6 +96,7 @@ int main(int argc, char **argv)
}
ksft_print_header();
+ ksft_set_plan(1);
ksft_print_msg("%s: Test the uninitialized futex value in FUTEX_WAIT\n",
basename(argv[0]));
diff --git a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
index da15a63269b4..0ae390ff8164 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
*
* Copyright © International Business Machines Corp., 2009
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* DESCRIPTION
* Test if FUTEX_WAIT op returns -EWOULDBLOCK if the futex value differs
* from the expected one.
@@ -65,6 +61,7 @@ int main(int argc, char *argv[])
}
ksft_print_header();
+ ksft_set_plan(1);
ksft_print_msg("%s: Test the unexpected futex value in FUTEX_WAIT\n",
basename(argv[0]));
diff --git a/tools/testing/selftests/futex/functional/run.sh b/tools/testing/selftests/futex/functional/run.sh
index 7ff002eed624..1acb6ace1680 100755
--- a/tools/testing/selftests/futex/functional/run.sh
+++ b/tools/testing/selftests/futex/functional/run.sh
@@ -1,14 +1,10 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-or-later
###############################################################################
#
# Copyright © International Business Machines Corp., 2009
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
# DESCRIPTION
# Run tests in the current directory.
#
diff --git a/tools/testing/selftests/futex/include/atomic.h b/tools/testing/selftests/futex/include/atomic.h
index f861da3e31ab..428bcd921bb5 100644
--- a/tools/testing/selftests/futex/include/atomic.h
+++ b/tools/testing/selftests/futex/include/atomic.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/******************************************************************************
*
* Copyright © International Business Machines Corp., 2009
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* DESCRIPTION
* GCC atomic builtin wrappers
* http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html
diff --git a/tools/testing/selftests/futex/include/futextest.h b/tools/testing/selftests/futex/include/futextest.h
index b98c3aba7102..ddbcfc9b7bac 100644
--- a/tools/testing/selftests/futex/include/futextest.h
+++ b/tools/testing/selftests/futex/include/futextest.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/******************************************************************************
*
* Copyright © International Business Machines Corp., 2009
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* DESCRIPTION
* Glibc independent futex library for testing kernel functionality.
*
diff --git a/tools/testing/selftests/futex/include/logging.h b/tools/testing/selftests/futex/include/logging.h
index 01989644e50a..874c69ce5cce 100644
--- a/tools/testing/selftests/futex/include/logging.h
+++ b/tools/testing/selftests/futex/include/logging.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/******************************************************************************
*
* Copyright © International Business Machines Corp., 2009
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* DESCRIPTION
* Glibc independent futex library for testing kernel functionality.
*
diff --git a/tools/testing/selftests/futex/run.sh b/tools/testing/selftests/futex/run.sh
index 88bcb1767362..5e76ea18f9fa 100755
--- a/tools/testing/selftests/futex/run.sh
+++ b/tools/testing/selftests/futex/run.sh
@@ -1,14 +1,10 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-or-later
###############################################################################
#
# Copyright © International Business Machines Corp., 2009
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
# DESCRIPTION
# Run all tests under the functional, performance, and stress directories.
# Format and summarize the results.
diff --git a/tools/testing/selftests/gpio/gpio-mockup-chardev.c b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
index aaa1e9f083c3..73ead8828d3a 100644
--- a/tools/testing/selftests/gpio/gpio-mockup-chardev.c
+++ b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
@@ -1,18 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* GPIO chardev test helper
*
* Copyright (C) 2016 Bamvor Jian Zhang
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#define _GNU_SOURCE
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
-#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
diff --git a/tools/testing/selftests/ia64/aliasing-test.c b/tools/testing/selftests/ia64/aliasing-test.c
index 62a190d45f38..1ad6896f10f7 100644
--- a/tools/testing/selftests/ia64/aliasing-test.c
+++ b/tools/testing/selftests/ia64/aliasing-test.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Exercise /dev/mem mmap cases that have been troublesome in the past
*
* (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <stdlib.h>
diff --git a/tools/testing/selftests/ima/config b/tools/testing/selftests/ima/config
deleted file mode 100644
index 6bc86d4d9bb4..000000000000
--- a/tools/testing/selftests/ima/config
+++ /dev/null
@@ -1,4 +0,0 @@
-CONFIG_IMA_APPRAISE
-CONFIG_IMA_ARCH_POLICY
-CONFIG_SECURITYFS
-CONFIG_KEXEC_VERIFY_SIG
diff --git a/tools/testing/selftests/ima/test_kexec_load.sh b/tools/testing/selftests/ima/test_kexec_load.sh
deleted file mode 100755
index 1c10093fb526..000000000000
--- a/tools/testing/selftests/ima/test_kexec_load.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0+
-# Loading a kernel image via the kexec_load syscall should fail
-# when the kerne is CONFIG_KEXEC_VERIFY_SIG enabled and the system
-# is booted in secureboot mode.
-
-TEST="$0"
-EFIVARFS="/sys/firmware/efi/efivars"
-rc=0
-
-# Kselftest framework requirement - SKIP code is 4.
-ksft_skip=4
-
-# kexec requires root privileges
-if [ $UID != 0 ]; then
- echo "$TEST: must be run as root" >&2
- exit $ksft_skip
-fi
-
-# Make sure that efivars is mounted in the normal location
-if ! grep -q "^\S\+ $EFIVARFS efivarfs" /proc/mounts; then
- echo "$TEST: efivars is not mounted on $EFIVARFS" >&2
- exit $ksft_skip
-fi
-
-# Get secureboot mode
-file="$EFIVARFS/SecureBoot-*"
-if [ ! -e $file ]; then
- echo "$TEST: unknown secureboot mode" >&2
- exit $ksft_skip
-fi
-secureboot=`hexdump $file | awk '{print substr($4,length($4),1)}'`
-
-# kexec_load should fail in secure boot mode
-KERNEL_IMAGE="/boot/vmlinuz-`uname -r`"
-kexec -l $KERNEL_IMAGE &>> /dev/null
-if [ $? == 0 ]; then
- kexec -u
- if [ "$secureboot" == "1" ]; then
- echo "$TEST: kexec_load succeeded [FAIL]"
- rc=1
- else
- echo "$TEST: kexec_load succeeded [PASS]"
- fi
-else
- if [ "$secureboot" == "1" ]; then
- echo "$TEST: kexec_load failed [PASS]"
- else
- echo "$TEST: kexec_load failed [FAIL]"
- rc=1
- fi
-fi
-
-exit $rc
diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c
index dac927e82336..4c156aeab6b8 100644
--- a/tools/testing/selftests/ipc/msgque.c
+++ b/tools/testing/selftests/ipc/msgque.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
-#include <linux/msg.h>
+#include <sys/msg.h>
#include <fcntl.h>
#include "../kselftest.h"
@@ -73,7 +74,7 @@ int restore_queue(struct msgque_data *msgque)
return 0;
destroy:
- if (msgctl(id, IPC_RMID, 0))
+ if (msgctl(id, IPC_RMID, NULL))
printf("Failed to destroy queue: %d\n", -errno);
return ret;
}
@@ -120,7 +121,7 @@ int check_and_destroy_queue(struct msgque_data *msgque)
ret = 0;
err:
- if (msgctl(msgque->msq_id, IPC_RMID, 0)) {
+ if (msgctl(msgque->msq_id, IPC_RMID, NULL)) {
printf("Failed to destroy queue: %d\n", -errno);
return -errno;
}
@@ -129,7 +130,7 @@ err:
int dump_queue(struct msgque_data *msgque)
{
- struct msqid64_ds ds;
+ struct msqid_ds ds;
int kern_id;
int i, ret;
@@ -245,7 +246,7 @@ int main(int argc, char **argv)
return ksft_exit_pass();
err_destroy:
- if (msgctl(msgque.msq_id, IPC_RMID, 0)) {
+ if (msgctl(msgque.msq_id, IPC_RMID, NULL)) {
printf("Failed to destroy queue: %d\n", -errno);
return ksft_exit_fail();
}
diff --git a/tools/testing/selftests/ir/ir_loopback.c b/tools/testing/selftests/ir/ir_loopback.c
index 858c19caf224..e700e09e3682 100644
--- a/tools/testing/selftests/ir/ir_loopback.c
+++ b/tools/testing/selftests/ir/ir_loopback.c
@@ -27,6 +27,8 @@
#define TEST_SCANCODES 10
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#define SYSFS_PATH_MAX 256
+#define DNAME_PATH_MAX 256
static const struct {
enum rc_proto proto;
@@ -51,12 +53,16 @@ static const struct {
{ RC_PROTO_RC6_6A_32, "rc-6-6a-32", 0xffffffff, "rc-6" },
{ RC_PROTO_RC6_MCE, "rc-6-mce", 0x00007fff, "rc-6" },
{ RC_PROTO_SHARP, "sharp", 0x1fff, "sharp" },
+ { RC_PROTO_IMON, "imon", 0x7fffffff, "imon" },
+ { RC_PROTO_RCMM12, "rcmm-12", 0x00000fff, "rcmm" },
+ { RC_PROTO_RCMM24, "rcmm-24", 0x00ffffff, "rcmm" },
+ { RC_PROTO_RCMM32, "rcmm-32", 0xffffffff, "rcmm" },
};
int lirc_open(const char *rc)
{
struct dirent *dent;
- char buf[100];
+ char buf[SYSFS_PATH_MAX + DNAME_PATH_MAX];
DIR *d;
int fd;
@@ -74,7 +80,7 @@ int lirc_open(const char *rc)
}
if (!dent)
- ksft_exit_fail_msg("cannot find lirc device for %s\n", rc);
+ ksft_exit_skip("cannot find lirc device for %s\n", rc);
closedir(d);
@@ -139,6 +145,11 @@ int main(int argc, char **argv)
(((scancode >> 8) ^ ~scancode) & 0xff) == 0)
continue;
+ if (rc_proto == RC_PROTO_RCMM32 &&
+ (scancode & 0x000c0000) != 0x000c0000 &&
+ scancode & 0x00008000)
+ continue;
+
struct lirc_scancode lsc = {
.rc_proto = rc_proto,
.scancode = scancode
diff --git a/tools/testing/selftests/ir/ir_loopback.sh b/tools/testing/selftests/ir/ir_loopback.sh
index 0a0b8dfa39be..b90dc9939f45 100755
--- a/tools/testing/selftests/ir/ir_loopback.sh
+++ b/tools/testing/selftests/ir/ir_loopback.sh
@@ -4,6 +4,11 @@
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
+if [ $UID != 0 ]; then
+ echo "Please run ir_loopback test as root [SKIP]"
+ exit $ksft_skip
+fi
+
if ! /sbin/modprobe -q -n rc-loopback; then
echo "ir_loopback: module rc-loopback is not found [SKIP]"
exit $ksft_skip
diff --git a/tools/testing/selftests/kcmp/Makefile b/tools/testing/selftests/kcmp/Makefile
index 47aa9887f9d4..b4d39f6b5124 100644
--- a/tools/testing/selftests/kcmp/Makefile
+++ b/tools/testing/selftests/kcmp/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
CFLAGS += -I../../../../usr/include/
TEST_GEN_PROGS := kcmp_test
diff --git a/tools/testing/selftests/ima/Makefile b/tools/testing/selftests/kexec/Makefile
index 0b3adf5444b6..aa91d2063249 100644
--- a/tools/testing/selftests/ima/Makefile
+++ b/tools/testing/selftests/kexec/Makefile
@@ -1,10 +1,12 @@
-# Makefile for kexec_load
+# SPDX-License-Identifier: GPL-2.0-only
+# Makefile for kexec tests
uname_M := $(shell uname -m 2>/dev/null || echo not)
ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
ifeq ($(ARCH),x86)
-TEST_PROGS := test_kexec_load.sh
+TEST_PROGS := test_kexec_load.sh test_kexec_file_load.sh
+TEST_FILES := kexec_common_lib.sh
include ../lib.mk
diff --git a/tools/testing/selftests/kexec/config b/tools/testing/selftests/kexec/config
new file mode 100644
index 000000000000..8962e862b2b8
--- /dev/null
+++ b/tools/testing/selftests/kexec/config
@@ -0,0 +1,3 @@
+CONFIG_IMA_APPRAISE=y
+CONFIG_IMA_ARCH_POLICY=y
+CONFIG_SECURITYFS=y
diff --git a/tools/testing/selftests/kexec/kexec_common_lib.sh b/tools/testing/selftests/kexec/kexec_common_lib.sh
new file mode 100755
index 000000000000..43017cfe88f7
--- /dev/null
+++ b/tools/testing/selftests/kexec/kexec_common_lib.sh
@@ -0,0 +1,220 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Kselftest framework defines: ksft_pass=0, ksft_fail=1, ksft_skip=4
+
+VERBOSE="${VERBOSE:-1}"
+IKCONFIG="/tmp/config-`uname -r`"
+KERNEL_IMAGE="/boot/vmlinuz-`uname -r`"
+SECURITYFS=$(grep "securityfs" /proc/mounts | awk '{print $2}')
+
+log_info()
+{
+ [ $VERBOSE -ne 0 ] && echo "[INFO] $1"
+}
+
+# The ksefltest framework requirement returns 0 for PASS.
+log_pass()
+{
+ [ $VERBOSE -ne 0 ] && echo "$1 [PASS]"
+ exit 0
+}
+
+# The ksefltest framework requirement returns 1 for FAIL.
+log_fail()
+{
+ [ $VERBOSE -ne 0 ] && echo "$1 [FAIL]"
+ exit 1
+}
+
+# The ksefltest framework requirement returns 4 for SKIP.
+log_skip()
+{
+ [ $VERBOSE -ne 0 ] && echo "$1"
+ exit 4
+}
+
+# Check efivar SecureBoot-$(the UUID) and SetupMode-$(the UUID).
+# (Based on kdump-lib.sh)
+get_efivarfs_secureboot_mode()
+{
+ local efivarfs="/sys/firmware/efi/efivars"
+ local secure_boot_file=""
+ local setup_mode_file=""
+ local secureboot_mode=0
+ local setup_mode=0
+
+ # Make sure that efivar_fs is mounted in the normal location
+ if ! grep -q "^\S\+ $efivarfs efivarfs" /proc/mounts; then
+ log_info "efivars is not mounted on $efivarfs"
+ return 0;
+ fi
+ secure_boot_file=$(find "$efivarfs" -name SecureBoot-* 2>/dev/null)
+ setup_mode_file=$(find "$efivarfs" -name SetupMode-* 2>/dev/null)
+ if [ -f "$secure_boot_file" ] && [ -f "$setup_mode_file" ]; then
+ secureboot_mode=$(hexdump -v -e '/1 "%d\ "' \
+ "$secure_boot_file"|cut -d' ' -f 5)
+ setup_mode=$(hexdump -v -e '/1 "%d\ "' \
+ "$setup_mode_file"|cut -d' ' -f 5)
+
+ if [ $secureboot_mode -eq 1 ] && [ $setup_mode -eq 0 ]; then
+ log_info "secure boot mode enabled (CONFIG_EFIVAR_FS)"
+ return 1;
+ fi
+ fi
+ return 0;
+}
+
+get_efi_var_secureboot_mode()
+{
+ local efi_vars
+ local secure_boot_file
+ local setup_mode_file
+ local secureboot_mode
+ local setup_mode
+
+ if [ ! -d "$efi_vars" ]; then
+ log_skip "efi_vars is not enabled\n"
+ fi
+ secure_boot_file=$(find "$efi_vars" -name SecureBoot-* 2>/dev/null)
+ setup_mode_file=$(find "$efi_vars" -name SetupMode-* 2>/dev/null)
+ if [ -f "$secure_boot_file/data" ] && \
+ [ -f "$setup_mode_file/data" ]; then
+ secureboot_mode=`od -An -t u1 "$secure_boot_file/data"`
+ setup_mode=`od -An -t u1 "$setup_mode_file/data"`
+
+ if [ $secureboot_mode -eq 1 ] && [ $setup_mode -eq 0 ]; then
+ log_info "secure boot mode enabled (CONFIG_EFI_VARS)"
+ return 1;
+ fi
+ fi
+ return 0;
+}
+
+# Check efivar SecureBoot-$(the UUID) and SetupMode-$(the UUID).
+# The secure boot mode can be accessed either as the last integer
+# of "od -An -t u1 /sys/firmware/efi/efivars/SecureBoot-*" or from
+# "od -An -t u1 /sys/firmware/efi/vars/SecureBoot-*/data". The efi
+# SetupMode can be similarly accessed.
+# Return 1 for SecureBoot mode enabled and SetupMode mode disabled.
+get_secureboot_mode()
+{
+ local secureboot_mode=0
+
+ get_efivarfs_secureboot_mode
+ secureboot_mode=$?
+
+ # fallback to using the efi_var files
+ if [ $secureboot_mode -eq 0 ]; then
+ get_efi_var_secureboot_mode
+ secureboot_mode=$?
+ fi
+
+ if [ $secureboot_mode -eq 0 ]; then
+ log_info "secure boot mode not enabled"
+ fi
+ return $secureboot_mode;
+}
+
+require_root_privileges()
+{
+ if [ $(id -ru) -ne 0 ]; then
+ log_skip "requires root privileges"
+ fi
+}
+
+# Look for config option in Kconfig file.
+# Return 1 for found and 0 for not found.
+kconfig_enabled()
+{
+ local config="$1"
+ local msg="$2"
+
+ grep -E -q $config $IKCONFIG
+ if [ $? -eq 0 ]; then
+ log_info "$msg"
+ return 1
+ fi
+ return 0
+}
+
+# Attempt to get the kernel config first via proc, and then by
+# extracting it from the kernel image or the configs.ko using
+# scripts/extract-ikconfig.
+# Return 1 for found.
+get_kconfig()
+{
+ local proc_config="/proc/config.gz"
+ local module_dir="/lib/modules/`uname -r`"
+ local configs_module="$module_dir/kernel/kernel/configs.ko"
+
+ if [ ! -f $proc_config ]; then
+ modprobe configs > /dev/null 2>&1
+ fi
+ if [ -f $proc_config ]; then
+ cat $proc_config | gunzip > $IKCONFIG 2>/dev/null
+ if [ $? -eq 0 ]; then
+ return 1
+ fi
+ fi
+
+ local extract_ikconfig="$module_dir/source/scripts/extract-ikconfig"
+ if [ ! -f $extract_ikconfig ]; then
+ log_skip "extract-ikconfig not found"
+ fi
+
+ $extract_ikconfig $KERNEL_IMAGE > $IKCONFIG 2>/dev/null
+ if [ $? -eq 1 ]; then
+ if [ ! -f $configs_module ]; then
+ log_skip "CONFIG_IKCONFIG not enabled"
+ fi
+ $extract_ikconfig $configs_module > $IKCONFIG
+ if [ $? -eq 1 ]; then
+ log_skip "CONFIG_IKCONFIG not enabled"
+ fi
+ fi
+ return 1
+}
+
+# Make sure that securityfs is mounted
+mount_securityfs()
+{
+ if [ -z $SECURITYFS ]; then
+ SECURITYFS=/sys/kernel/security
+ mount -t securityfs security $SECURITYFS
+ fi
+
+ if [ ! -d "$SECURITYFS" ]; then
+ log_fail "$SECURITYFS :securityfs is not mounted"
+ fi
+}
+
+# The policy rule format is an "action" followed by key-value pairs. This
+# function supports up to two key-value pairs, in any order.
+# For example: action func=<keyword> [appraise_type=<type>]
+# Return 1 for found and 0 for not found.
+check_ima_policy()
+{
+ local action="$1"
+ local keypair1="$2"
+ local keypair2="$3"
+ local ret=0
+
+ mount_securityfs
+
+ local ima_policy=$SECURITYFS/ima/policy
+ if [ ! -e $ima_policy ]; then
+ log_fail "$ima_policy not found"
+ fi
+
+ if [ -n $keypair2 ]; then
+ grep -e "^$action.*$keypair1" "$ima_policy" | \
+ grep -q -e "$keypair2"
+ else
+ grep -q -e "^$action.*$keypair1" "$ima_policy"
+ fi
+
+ # invert "grep -q" result, returning 1 for found.
+ [ $? -eq 0 ] && ret=1
+ return $ret
+}
diff --git a/tools/testing/selftests/kexec/test_kexec_file_load.sh b/tools/testing/selftests/kexec/test_kexec_file_load.sh
new file mode 100755
index 000000000000..fa7c24e8eefb
--- /dev/null
+++ b/tools/testing/selftests/kexec/test_kexec_file_load.sh
@@ -0,0 +1,208 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Loading a kernel image via the kexec_file_load syscall can verify either
+# the IMA signature stored in the security.ima xattr or the PE signature,
+# both signatures depending on the IMA policy, or none.
+#
+# To determine whether the kernel image is signed, this test depends
+# on pesign and getfattr. This test also requires the kernel to be
+# built with CONFIG_IKCONFIG enabled and either CONFIG_IKCONFIG_PROC
+# enabled or access to the extract-ikconfig script.
+
+TEST="KEXEC_FILE_LOAD"
+. ./kexec_common_lib.sh
+
+trap "{ rm -f $IKCONFIG ; }" EXIT
+
+# Some of the IMA builtin policies may require the kexec kernel image to
+# be signed, but these policy rules may be replaced with a custom
+# policy. Only CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS persists after
+# loading a custom policy. Check if it is enabled, before reading the
+# IMA runtime sysfs policy file.
+# Return 1 for IMA signature required and 0 for not required.
+is_ima_sig_required()
+{
+ local ret=0
+
+ kconfig_enabled "CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS=y" \
+ "IMA kernel image signature required"
+ if [ $? -eq 1 ]; then
+ log_info "IMA signature required"
+ return 1
+ fi
+
+ # The architecture specific or a custom policy may require the
+ # kexec kernel image be signed. Policy rules are walked
+ # sequentially. As a result, a policy rule may be defined, but
+ # might not necessarily be used. This test assumes if a policy
+ # rule is specified, that is the intent.
+ if [ $ima_read_policy -eq 1 ]; then
+ check_ima_policy "appraise" "func=KEXEC_KERNEL_CHECK" \
+ "appraise_type=imasig"
+ ret=$?
+ [ $ret -eq 1 ] && log_info "IMA signature required";
+ fi
+ return $ret
+}
+
+# The kexec_file_load_test() is complicated enough, require pesign.
+# Return 1 for PE signature found and 0 for not found.
+check_for_pesig()
+{
+ which pesign > /dev/null 2>&1 || log_skip "pesign not found"
+
+ pesign -i $KERNEL_IMAGE --show-signature | grep -q "No signatures"
+ local ret=$?
+ if [ $ret -eq 1 ]; then
+ log_info "kexec kernel image PE signed"
+ else
+ log_info "kexec kernel image not PE signed"
+ fi
+ return $ret
+}
+
+# The kexec_file_load_test() is complicated enough, require getfattr.
+# Return 1 for IMA signature found and 0 for not found.
+check_for_imasig()
+{
+ local ret=0
+
+ which getfattr > /dev/null 2>&1
+ if [ $? -eq 1 ]; then
+ log_skip "getfattr not found"
+ fi
+
+ line=$(getfattr -n security.ima -e hex --absolute-names $KERNEL_IMAGE 2>&1)
+ echo $line | grep -q "security.ima=0x03"
+ if [ $? -eq 0 ]; then
+ ret=1
+ log_info "kexec kernel image IMA signed"
+ else
+ log_info "kexec kernel image not IMA signed"
+ fi
+ return $ret
+}
+
+kexec_file_load_test()
+{
+ local succeed_msg="kexec_file_load succeeded"
+ local failed_msg="kexec_file_load failed"
+ local key_msg="try enabling the CONFIG_INTEGRITY_PLATFORM_KEYRING"
+
+ line=$(kexec --load --kexec-file-syscall $KERNEL_IMAGE 2>&1)
+
+ if [ $? -eq 0 ]; then
+ kexec --unload --kexec-file-syscall
+
+ # In secureboot mode with an architecture specific
+ # policy, make sure either an IMA or PE signature exists.
+ if [ $secureboot -eq 1 ] && [ $arch_policy -eq 1 ] && \
+ [ $ima_signed -eq 0 ] && [ $pe_signed -eq 0 ]; then
+ log_fail "$succeed_msg (missing sig)"
+ fi
+
+ if [ $kexec_sig_required -eq 1 -o $pe_sig_required -eq 1 ] \
+ && [ $pe_signed -eq 0 ]; then
+ log_fail "$succeed_msg (missing PE sig)"
+ fi
+
+ if [ $ima_sig_required -eq 1 ] && [ $ima_signed -eq 0 ]; then
+ log_fail "$succeed_msg (missing IMA sig)"
+ fi
+
+ if [ $pe_sig_required -eq 0 ] && [ $ima_appraise -eq 1 ] \
+ && [ $ima_sig_required -eq 0 ] && [ $ima_signed -eq 0 ] \
+ && [ $ima_read_policy -eq 0 ]; then
+ log_fail "$succeed_msg (possibly missing IMA sig)"
+ fi
+
+ if [ $pe_sig_required -eq 0 ] && [ $ima_appraise -eq 0 ]; then
+ log_info "No signature verification required"
+ elif [ $pe_sig_required -eq 0 ] && [ $ima_appraise -eq 1 ] \
+ && [ $ima_sig_required -eq 0 ] && [ $ima_signed -eq 0 ] \
+ && [ $ima_read_policy -eq 1 ]; then
+ log_info "No signature verification required"
+ fi
+
+ log_pass "$succeed_msg"
+ fi
+
+ # Check the reason for the kexec_file_load failure
+ echo $line | grep -q "Required key not available"
+ if [ $? -eq 0 ]; then
+ if [ $platform_keyring -eq 0 ]; then
+ log_pass "$failed_msg (-ENOKEY), $key_msg"
+ else
+ log_pass "$failed_msg (-ENOKEY)"
+ fi
+ fi
+
+ if [ $kexec_sig_required -eq 1 -o $pe_sig_required -eq 1 ] \
+ && [ $pe_signed -eq 0 ]; then
+ log_pass "$failed_msg (missing PE sig)"
+ fi
+
+ if [ $ima_sig_required -eq 1 ] && [ $ima_signed -eq 0 ]; then
+ log_pass "$failed_msg (missing IMA sig)"
+ fi
+
+ if [ $pe_sig_required -eq 0 ] && [ $ima_appraise -eq 1 ] \
+ && [ $ima_sig_required -eq 0 ] && [ $ima_read_policy -eq 0 ] \
+ && [ $ima_signed -eq 0 ]; then
+ log_pass "$failed_msg (possibly missing IMA sig)"
+ fi
+
+ log_pass "$failed_msg"
+ return 0
+}
+
+# kexec requires root privileges
+require_root_privileges
+
+# get the kernel config
+get_kconfig
+
+kconfig_enabled "CONFIG_KEXEC_FILE=y" "kexec_file_load is enabled"
+if [ $? -eq 0 ]; then
+ log_skip "kexec_file_load is not enabled"
+fi
+
+# Determine which kernel config options are enabled
+kconfig_enabled "CONFIG_IMA_APPRAISE=y" "IMA enabled"
+ima_appraise=$?
+
+kconfig_enabled "CONFIG_IMA_ARCH_POLICY=y" \
+ "architecture specific policy enabled"
+arch_policy=$?
+
+kconfig_enabled "CONFIG_INTEGRITY_PLATFORM_KEYRING=y" \
+ "platform keyring enabled"
+platform_keyring=$?
+
+kconfig_enabled "CONFIG_IMA_READ_POLICY=y" "reading IMA policy permitted"
+ima_read_policy=$?
+
+kconfig_enabled "CONFIG_KEXEC_SIG_FORCE=y" \
+ "kexec signed kernel image required"
+kexec_sig_required=$?
+
+kconfig_enabled "CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y" \
+ "PE signed kernel image required"
+pe_sig_required=$?
+
+is_ima_sig_required
+ima_sig_required=$?
+
+get_secureboot_mode
+secureboot=$?
+
+# Are there pe and ima signatures
+check_for_pesig
+pe_signed=$?
+
+check_for_imasig
+ima_signed=$?
+
+# Test loading the kernel image via kexec_file_load syscall
+kexec_file_load_test
diff --git a/tools/testing/selftests/kexec/test_kexec_load.sh b/tools/testing/selftests/kexec/test_kexec_load.sh
new file mode 100755
index 000000000000..49c6aa929137
--- /dev/null
+++ b/tools/testing/selftests/kexec/test_kexec_load.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Prevent loading a kernel image via the kexec_load syscall when
+# signatures are required. (Dependent on CONFIG_IMA_ARCH_POLICY.)
+
+TEST="$0"
+. ./kexec_common_lib.sh
+
+# kexec requires root privileges
+require_root_privileges
+
+# get the kernel config
+get_kconfig
+
+kconfig_enabled "CONFIG_KEXEC=y" "kexec_load is enabled"
+if [ $? -eq 0 ]; then
+ log_skip "kexec_load is not enabled"
+fi
+
+kconfig_enabled "CONFIG_IMA_APPRAISE=y" "IMA enabled"
+ima_appraise=$?
+
+kconfig_enabled "CONFIG_IMA_ARCH_POLICY=y" \
+ "IMA architecture specific policy enabled"
+arch_policy=$?
+
+get_secureboot_mode
+secureboot=$?
+
+# kexec_load should fail in secure boot mode and CONFIG_IMA_ARCH_POLICY enabled
+kexec --load $KERNEL_IMAGE > /dev/null 2>&1
+if [ $? -eq 0 ]; then
+ kexec --unload
+ if [ $secureboot -eq 1 ] && [ $arch_policy -eq 1 ]; then
+ log_fail "kexec_load succeeded"
+ elif [ $ima_appraise -eq 0 -o $arch_policy -eq 0 ]; then
+ log_info "Either IMA or the IMA arch policy is not enabled"
+ fi
+ log_pass "kexec_load succeeded"
+else
+ if [ $secureboot -eq 1 ] && [ $arch_policy -eq 1 ] ; then
+ log_pass "kexec_load failed"
+ else
+ log_fail "kexec_load failed"
+ fi
+fi
diff --git a/tools/testing/selftests/kmod/Makefile b/tools/testing/selftests/kmod/Makefile
index fa2ccc5fb3de..5b3e746a0bee 100644
--- a/tools/testing/selftests/kmod/Makefile
+++ b/tools/testing/selftests/kmod/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
# Makefile for kmod loading selftests
# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index a3edb2c8e43d..ec15c4f6af55 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -13,6 +13,7 @@
#include <stdlib.h>
#include <unistd.h>
#include <stdarg.h>
+#include <stdio.h>
/* define kselftest exit codes */
#define KSFT_PASS 0
@@ -32,6 +33,7 @@ struct ksft_count {
};
static struct ksft_count ksft_cnt;
+static unsigned int ksft_plan;
static inline int ksft_test_num(void)
{
@@ -60,13 +62,21 @@ static inline void ksft_print_header(void)
printf("TAP version 13\n");
}
+static inline void ksft_set_plan(unsigned int plan)
+{
+ ksft_plan = plan;
+ printf("1..%d\n", ksft_plan);
+}
+
static inline void ksft_print_cnts(void)
{
- printf("Pass %d Fail %d Xfail %d Xpass %d Skip %d Error %d\n",
+ if (ksft_plan != ksft_test_num())
+ printf("# Planned tests != run tests (%u != %u)\n",
+ ksft_plan, ksft_test_num());
+ printf("# Pass %d Fail %d Xfail %d Xpass %d Skip %d Error %d\n",
ksft_cnt.ksft_pass, ksft_cnt.ksft_fail,
ksft_cnt.ksft_xfail, ksft_cnt.ksft_xpass,
ksft_cnt.ksft_xskip, ksft_cnt.ksft_error);
- printf("1..%d\n", ksft_test_num());
}
static inline void ksft_print_msg(const char *msg, ...)
@@ -110,7 +120,7 @@ static inline void ksft_test_result_skip(const char *msg, ...)
ksft_cnt.ksft_xskip++;
va_start(args, msg);
- printf("ok %d # skip ", ksft_test_num());
+ printf("not ok %d # SKIP ", ksft_test_num());
vprintf(msg, args);
va_end(args);
}
@@ -171,7 +181,7 @@ static inline int ksft_exit_skip(const char *msg, ...)
va_list args;
va_start(args, msg);
- printf("1..%d # Skipped: ", ksft_test_num());
+ printf("not ok %d # SKIP ", 1 + ksft_test_num());
vprintf(msg, args);
va_end(args);
} else {
diff --git a/tools/testing/selftests/kselftest/prefix.pl b/tools/testing/selftests/kselftest/prefix.pl
new file mode 100755
index 000000000000..ec7e48118183
--- /dev/null
+++ b/tools/testing/selftests/kselftest/prefix.pl
@@ -0,0 +1,23 @@
+#!/usr/bin/perl
+# SPDX-License-Identifier: GPL-2.0
+# Prefix all lines with "# ", unbuffered. Command being piped in may need
+# to have unbuffering forced with "stdbuf -i0 -o0 -e0 $cmd".
+use strict;
+
+binmode STDIN;
+binmode STDOUT;
+
+STDOUT->autoflush(1);
+
+my $needed = 1;
+while (1) {
+ my $char;
+ my $bytes = sysread(STDIN, $char, 1);
+ exit 0 if ($bytes == 0);
+ if ($needed) {
+ print "# ";
+ $needed = 0;
+ }
+ print $char;
+ $needed = 1 if ($char eq "\n");
+}
diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh
new file mode 100644
index 000000000000..00c9020bdda8
--- /dev/null
+++ b/tools/testing/selftests/kselftest/runner.sh
@@ -0,0 +1,76 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Runs a set of tests in a given subdirectory.
+export skip_rc=4
+export logfile=/dev/stdout
+export per_test_logging=
+
+# There isn't a shell-agnostic way to find the path of a sourced file,
+# so we must rely on BASE_DIR being set to find other tools.
+if [ -z "$BASE_DIR" ]; then
+ echo "Error: BASE_DIR must be set before sourcing." >&2
+ exit 1
+fi
+
+# If Perl is unavailable, we must fall back to line-at-a-time prefixing
+# with sed instead of unbuffered output.
+tap_prefix()
+{
+ if [ ! -x /usr/bin/perl ]; then
+ sed -e 's/^/# /'
+ else
+ "$BASE_DIR"/kselftest/prefix.pl
+ fi
+}
+
+run_one()
+{
+ DIR="$1"
+ TEST="$2"
+ NUM="$3"
+
+ BASENAME_TEST=$(basename $TEST)
+
+ TEST_HDR_MSG="selftests: $DIR: $BASENAME_TEST"
+ echo "# $TEST_HDR_MSG"
+ if [ ! -x "$TEST" ]; then
+ echo -n "# Warning: file $TEST is "
+ if [ ! -e "$TEST" ]; then
+ echo "missing!"
+ else
+ echo "not executable, correct this."
+ fi
+ echo "not ok $test_num $TEST_HDR_MSG"
+ else
+ cd `dirname $TEST` > /dev/null
+ (((((./$BASENAME_TEST 2>&1; echo $? >&3) |
+ tap_prefix >&4) 3>&1) |
+ (read xs; exit $xs)) 4>>"$logfile" &&
+ echo "ok $test_num $TEST_HDR_MSG") ||
+ (if [ $? -eq $skip_rc ]; then \
+ echo "not ok $test_num $TEST_HDR_MSG # SKIP"
+ else
+ echo "not ok $test_num $TEST_HDR_MSG"
+ fi)
+ cd - >/dev/null
+ fi
+}
+
+run_many()
+{
+ echo "TAP version 13"
+ DIR=$(basename "$PWD")
+ test_num=0
+ total=$(echo "$@" | wc -w)
+ echo "1..$total"
+ for TEST in "$@"; do
+ BASENAME_TEST=$(basename $TEST)
+ test_num=$(( test_num + 1 ))
+ if [ -n "$per_test_logging" ]; then
+ logfile="/tmp/$BASENAME_TEST"
+ cat /dev/null > "$logfile"
+ fi
+ run_one "$DIR" "$TEST" "$test_num"
+ done
+}
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
index 76d654ef3234..5336b26506ab 100644
--- a/tools/testing/selftests/kselftest_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by the GPLv2 license.
*
* kselftest_harness.h: simple C unit test helper.
*
@@ -62,6 +62,7 @@
#include <sys/wait.h>
#include <unistd.h>
+#define TEST_TIMEOUT_DEFAULT 30
/* Utilities exposed to the test definitions */
#ifndef TH_LOG_STREAM
@@ -168,8 +169,9 @@
#define __TEST_IMPL(test_name, _signal) \
static void test_name(struct __test_metadata *_metadata); \
static struct __test_metadata _##test_name##_object = \
- { name: "global." #test_name, \
- fn: &test_name, termsig: _signal }; \
+ { .name = "global." #test_name, \
+ .fn = &test_name, .termsig = _signal, \
+ .timeout = TEST_TIMEOUT_DEFAULT, }; \
static void __attribute__((constructor)) _register_##test_name(void) \
{ \
__register_test(&_##test_name##_object); \
@@ -280,12 +282,15 @@
*/
/* TODO(wad) register fixtures on dedicated test lists. */
#define TEST_F(fixture_name, test_name) \
- __TEST_F_IMPL(fixture_name, test_name, -1)
+ __TEST_F_IMPL(fixture_name, test_name, -1, TEST_TIMEOUT_DEFAULT)
#define TEST_F_SIGNAL(fixture_name, test_name, signal) \
- __TEST_F_IMPL(fixture_name, test_name, signal)
+ __TEST_F_IMPL(fixture_name, test_name, signal, TEST_TIMEOUT_DEFAULT)
-#define __TEST_F_IMPL(fixture_name, test_name, signal) \
+#define TEST_F_TIMEOUT(fixture_name, test_name, timeout) \
+ __TEST_F_IMPL(fixture_name, test_name, -1, timeout)
+
+#define __TEST_F_IMPL(fixture_name, test_name, signal, tmout) \
static void fixture_name##_##test_name( \
struct __test_metadata *_metadata, \
FIXTURE_DATA(fixture_name) *self); \
@@ -304,9 +309,10 @@
} \
static struct __test_metadata \
_##fixture_name##_##test_name##_object = { \
- name: #fixture_name "." #test_name, \
- fn: &wrapper_##fixture_name##_##test_name, \
- termsig: signal, \
+ .name = #fixture_name "." #test_name, \
+ .fn = &wrapper_##fixture_name##_##test_name, \
+ .termsig = signal, \
+ .timeout = tmout, \
}; \
static void __attribute__((constructor)) \
_register_##fixture_name##_##test_name(void) \
@@ -632,6 +638,7 @@ struct __test_metadata {
int termsig;
int passed;
int trigger; /* extra handler after the evaluation */
+ int timeout;
__u8 step;
bool no_print; /* manual trigger when TH_LOG_STREAM is not available */
struct __test_metadata *prev, *next;
@@ -696,6 +703,7 @@ void __run_test(struct __test_metadata *t)
t->passed = 1;
t->trigger = 0;
printf("[ RUN ] %s\n", t->name);
+ alarm(t->timeout);
child_pid = fork();
if (child_pid < 0) {
printf("ERROR SPAWNING TEST CHILD\n");
@@ -744,6 +752,7 @@ void __run_test(struct __test_metadata *t)
}
}
printf("[ %4s ] %s\n", (t->passed ? "OK" : "FAIL"), t->name);
+ alarm(0);
}
static int test_harness_run(int __attribute__((unused)) argc,
diff --git a/tools/testing/selftests/kselftest_module.h b/tools/testing/selftests/kselftest_module.h
new file mode 100644
index 000000000000..e8eafaf0941a
--- /dev/null
+++ b/tools/testing/selftests/kselftest_module.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __KSELFTEST_MODULE_H
+#define __KSELFTEST_MODULE_H
+
+#include <linux/module.h>
+
+/*
+ * Test framework for writing test modules to be loaded by kselftest.
+ * See Documentation/dev-tools/kselftest.rst for an example test module.
+ */
+
+#define KSTM_MODULE_GLOBALS() \
+static unsigned int total_tests __initdata; \
+static unsigned int failed_tests __initdata
+
+#define KSTM_CHECK_ZERO(x) do { \
+ total_tests++; \
+ if (x) { \
+ pr_warn("TC failed at %s:%d\n", __func__, __LINE__); \
+ failed_tests++; \
+ } \
+} while (0)
+
+static inline int kstm_report(unsigned int total_tests, unsigned int failed_tests)
+{
+ if (failed_tests == 0)
+ pr_info("all %u tests passed\n", total_tests);
+ else
+ pr_warn("failed %u out of %u tests\n", failed_tests, total_tests);
+
+ return failed_tests ? -EINVAL : 0;
+}
+
+#define KSTM_MODULE_LOADERS(__module) \
+static int __init __module##_init(void) \
+{ \
+ pr_info("loaded.\n"); \
+ selftest(); \
+ return kstm_report(total_tests, failed_tests); \
+} \
+static void __exit __module##_exit(void) \
+{ \
+ pr_info("unloaded.\n"); \
+} \
+module_init(__module##_init); \
+module_exit(__module##_exit)
+
+#endif /* __KSELFTEST_MODULE_H */
diff --git a/tools/testing/selftests/kselftest_module.sh b/tools/testing/selftests/kselftest_module.sh
new file mode 100755
index 000000000000..18e1c7992d30
--- /dev/null
+++ b/tools/testing/selftests/kselftest_module.sh
@@ -0,0 +1,84 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+
+#
+# Runs an individual test module.
+#
+# kselftest expects a separate executable for each test, this can be
+# created by adding a script like this:
+#
+# #!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+# $(dirname $0)/../kselftest_module.sh "description" module_name
+#
+# Example: tools/testing/selftests/lib/printf.sh
+
+desc="" # Output prefix.
+module="" # Filename (without the .ko).
+args="" # modprobe arguments.
+
+modprobe="/sbin/modprobe"
+
+main() {
+ parse_args "$@"
+ assert_root
+ assert_have_module
+ run_module
+}
+
+parse_args() {
+ script=${0##*/}
+
+ if [ $# -lt 2 ]; then
+ echo "Usage: $script <description> <module_name> [FAIL]"
+ exit 1
+ fi
+
+ desc="$1"
+ shift || true
+ module="$1"
+ shift || true
+ args="$@"
+}
+
+assert_root() {
+ if [ ! -w /dev ]; then
+ skip "please run as root"
+ fi
+}
+
+assert_have_module() {
+ if ! $modprobe -q -n $module; then
+ skip "module $module is not found"
+ fi
+}
+
+run_module() {
+ if $modprobe -q $module $args; then
+ $modprobe -q -r $module
+ say "ok"
+ else
+ fail ""
+ fi
+}
+
+say() {
+ echo "$desc: $1"
+}
+
+
+fail() {
+ say "$1 [FAIL]" >&2
+ exit 1
+}
+
+skip() {
+ say "$1 [SKIP]" >&2
+ # Kselftest framework requirement - SKIP code is 4.
+ exit 4
+}
+
+#
+# Main script
+#
+main "$@"
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 6210ba41c29e..41266af0d3dc 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -1,8 +1,15 @@
/x86_64/cr4_cpuid_sync_test
/x86_64/evmcs_test
+/x86_64/hyperv_cpuid
+/x86_64/kvm_create_max_vcpus
+/x86_64/mmio_warning_test
/x86_64/platform_info_test
/x86_64/set_sregs_test
+/x86_64/smm_test
+/x86_64/state_test
/x86_64/sync_regs_test
+/x86_64/vmx_close_while_nested_test
+/x86_64/vmx_set_nested_state_test
/x86_64/vmx_tsc_adjust_test
-/x86_64/state_test
+/clear_dirty_log_test
/dirty_log_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index f9a0e9938480..62afd0b43074 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -1,3 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+include ../../../../scripts/Kbuild.include
+
all:
top_srcdir = ../../../..
@@ -8,19 +11,24 @@ LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebi
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c
LIBKVM_aarch64 = lib/aarch64/processor.c
-TEST_GEN_PROGS_x86_64 = x86_64/platform_info_test
+TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
+TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
+TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
+TEST_GEN_PROGS_x86_64 += x86_64/kvm_create_max_vcpus
+TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
+TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
+TEST_GEN_PROGS_x86_64 += x86_64/smm_test
+TEST_GEN_PROGS_x86_64 += x86_64/state_test
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
-TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
-TEST_GEN_PROGS_x86_64 += x86_64/state_test
-TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
-TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
-TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
+TEST_GEN_PROGS_x86_64 += dirty_log_test
-TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += clear_dirty_log_test
+TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
LIBKVM += $(LIBKVM_$(UNAME_M))
@@ -28,8 +36,14 @@ LIBKVM += $(LIBKVM_$(UNAME_M))
INSTALL_HDR_PATH = $(top_srcdir)/usr
LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
-CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
-LDFLAGS += -pthread
+CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
+ -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
+ -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
+
+no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
+ $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
+
+LDFLAGS += -pthread $(no-pie-option)
# After inclusion, $(OUTPUT) is defined and
# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 4715cfba20dc..fc27f890155b 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -131,6 +131,7 @@ static void *vcpu_worker(void *data)
while (!READ_ONCE(host_quit)) {
/* Let the guest dirty the random pages */
ret = _vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
if (get_ucall(vm, VCPU_ID, &uc) == UCALL_SYNC) {
pages_count += TEST_PAGES_PER_LOOP;
generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
@@ -288,8 +289,11 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
#endif
max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1;
guest_page_size = (1ul << guest_page_shift);
- /* 1G of guest page sized pages */
- guest_num_pages = (1ul << (30 - guest_page_shift));
+ /*
+ * A little more than 1G of guest page sized pages. Cover the
+ * case where the size is not aligned to 64 pages.
+ */
+ guest_num_pages = (1ul << (30 - guest_page_shift)) + 16;
host_page_size = getpagesize();
host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
!!((guest_num_pages * guest_page_size) % host_page_size);
@@ -311,7 +315,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
#ifdef USE_CLEAR_DIRTY_LOG
struct kvm_enable_cap cap = {};
- cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT;
+ cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
cap.args[0] = 1;
vm_enable_cap(vm, &cap);
#endif
@@ -359,7 +363,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
#ifdef USE_CLEAR_DIRTY_LOG
kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
- DIV_ROUND_UP(host_num_pages, 64) * 64);
+ host_num_pages);
#endif
vm_dirty_log_verify(bmap);
iteration++;
@@ -423,11 +427,14 @@ int main(int argc, char *argv[])
unsigned long interval = TEST_HOST_LOOP_INTERVAL;
bool mode_selected = false;
uint64_t phys_offset = 0;
- unsigned int mode, host_ipa_limit;
+ unsigned int mode;
int opt, i;
+#ifdef __aarch64__
+ unsigned int host_ipa_limit;
+#endif
#ifdef USE_CLEAR_DIRTY_LOG
- if (!kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT)) {
+ if (!kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2)) {
fprintf(stderr, "KVM_CLEAR_DIRTY_LOG not available, skipping tests\n");
exit(KSFT_SKIP);
}
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index a84785b02557..7318fb054ae9 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -1,10 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tools/testing/selftests/kvm/include/kvm_util.h
*
* Copyright (C) 2018, Google LLC.
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
*/
#ifndef SELFTEST_KVM_UTIL_H
#define SELFTEST_KVM_UTIL_H
@@ -102,6 +100,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
+void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid);
void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_mp_state *mp_state);
void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
@@ -117,6 +116,12 @@ void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_vcpu_events *events);
void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_vcpu_events *events);
+#ifdef __x86_64__
+void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_nested_state *state);
+int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_nested_state *state, bool ignore_error);
+#endif
const char *exit_reason_str(unsigned int exit_reason);
@@ -132,6 +137,8 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size,
void *guest_code);
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
+bool vm_is_unrestricted_guest(struct kvm_vm *vm);
+
struct kvm_userspace_memory_region *
kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
uint64_t end);
diff --git a/tools/testing/selftests/kvm/include/sparsebit.h b/tools/testing/selftests/kvm/include/sparsebit.h
index 31e030915c1f..12a9a4b9cead 100644
--- a/tools/testing/selftests/kvm/include/sparsebit.h
+++ b/tools/testing/selftests/kvm/include/sparsebit.h
@@ -1,11 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tools/testing/selftests/kvm/include/sparsebit.h
*
* Copyright (C) 2018, Google LLC.
*
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
- *
* Header file that describes API to the sparsebit library.
* This library provides a memory efficient means of storing
* the settings of bits indexed via a uint64_t. Memory usage
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index c7dafe8bd02c..a41db6fb7e24 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -1,10 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tools/testing/selftests/kvm/include/test_util.h
*
* Copyright (C) 2018, Google LLC.
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
*/
#ifndef SELFTEST_KVM_TEST_UTIL_H
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index e2884c2b81ff..80d19740d2dc 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -1,10 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tools/testing/selftests/kvm/include/x86_64/processor.h
*
* Copyright (C) 2018, Google LLC.
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
*/
#ifndef SELFTEST_KVM_PROCESSOR_H
@@ -303,6 +301,8 @@ static inline unsigned long get_xmm(int n)
return 0;
}
+bool is_intel_cpu(void);
+
struct kvm_x86_state;
struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid);
void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid,
@@ -778,6 +778,33 @@ void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
#define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+#define APIC_BASE_MSR 0x800
+#define X2APIC_ENABLE (1UL << 10)
+#define APIC_ICR 0x300
+#define APIC_DEST_SELF 0x40000
+#define APIC_DEST_ALLINC 0x80000
+#define APIC_DEST_ALLBUT 0xC0000
+#define APIC_ICR_RR_MASK 0x30000
+#define APIC_ICR_RR_INVALID 0x00000
+#define APIC_ICR_RR_INPROG 0x10000
+#define APIC_ICR_RR_VALID 0x20000
+#define APIC_INT_LEVELTRIG 0x08000
+#define APIC_INT_ASSERT 0x04000
+#define APIC_ICR_BUSY 0x01000
+#define APIC_DEST_LOGICAL 0x00800
+#define APIC_DEST_PHYSICAL 0x00000
+#define APIC_DM_FIXED 0x00000
+#define APIC_DM_FIXED_MASK 0x00700
+#define APIC_DM_LOWEST 0x00100
+#define APIC_DM_SMI 0x00200
+#define APIC_DM_REMRD 0x00300
+#define APIC_DM_NMI 0x00400
+#define APIC_DM_INIT 0x00500
+#define APIC_DM_STARTUP 0x00600
+#define APIC_DM_EXTINT 0x00700
+#define APIC_VECTOR_MASK 0x000FF
+#define APIC_ICR2 0x310
+
#define MSR_IA32_TSCDEADLINE 0x000006e0
#define MSR_IA32_UCODE_WRITE 0x00000079
diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index c9bd935b939c..69b17055f63d 100644
--- a/tools/testing/selftests/kvm/include/x86_64/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -1,10 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tools/testing/selftests/kvm/include/x86_64/vmx.h
*
* Copyright (C) 2018, Google LLC.
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
*/
#ifndef SELFTEST_KVM_VMX_H
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
index e8c42506a09d..19e667911496 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -7,6 +7,8 @@
#define _GNU_SOURCE /* for program_invocation_name */
+#include <linux/compiler.h>
+
#include "kvm_util.h"
#include "../kvm_util_internal.h"
#include "processor.h"
@@ -67,15 +69,13 @@ static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
return 1 << (vm->va_bits - shift);
}
-static uint64_t ptrs_per_pte(struct kvm_vm *vm)
+static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
{
return 1 << (vm->page_shift - 3);
}
void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
{
- int rc;
-
if (!vm->pgd_created) {
vm_paddr_t paddr = vm_phy_pages_alloc(vm,
page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size,
@@ -181,6 +181,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
unmapped_gva:
TEST_ASSERT(false, "No mapping for vm virtual address, "
"gva: 0x%lx", gva);
+ exit(1);
}
static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
@@ -226,7 +227,7 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
uint64_t extra_pg_pages = (extra_mem_pages / ptrs_per_4k_pte) * 2;
struct kvm_vm *vm;
- vm = vm_create(VM_MODE_P52V48_4K, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
+ vm = vm_create(VM_MODE_P40V48_4K, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
vm_vcpu_add_default(vm, vcpuid, guest_code);
@@ -312,6 +313,6 @@ void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate);
get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc);
- fprintf(stream, "%*spstate: 0x%.16llx pc: 0x%.16llx\n",
+ fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
indent, "", pstate, pc);
}
diff --git a/tools/testing/selftests/kvm/lib/assert.c b/tools/testing/selftests/kvm/lib/assert.c
index 6398efe67885..4911fc77d0f6 100644
--- a/tools/testing/selftests/kvm/lib/assert.c
+++ b/tools/testing/selftests/kvm/lib/assert.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/assert.c
*
* Copyright (C) 2018, Google LLC.
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
*/
#define _GNU_SOURCE /* for getline(3) and strchrnul(3)*/
diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c
index 5eb857584aa3..bc75a91e00a6 100644
--- a/tools/testing/selftests/kvm/lib/elf.c
+++ b/tools/testing/selftests/kvm/lib/elf.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/elf.c
*
* Copyright (C) 2018, Google LLC.
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
*/
#include "test_util.h"
diff --git a/tools/testing/selftests/kvm/lib/io.c b/tools/testing/selftests/kvm/lib/io.c
index cff869ffe6ee..eaf351cc7e7f 100644
--- a/tools/testing/selftests/kvm/lib/io.c
+++ b/tools/testing/selftests/kvm/lib/io.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/io.c
*
* Copyright (C) 2018, Google LLC.
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
*/
#include "test_util.h"
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index b52cfdefecbf..ee864fa07d8e 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/kvm_util.c
*
* Copyright (C) 2018, Google LLC.
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
*/
#include "test_util.h"
@@ -91,6 +90,11 @@ static void vm_open(struct kvm_vm *vm, int perm, unsigned long type)
if (vm->kvm_fd < 0)
exit(KSFT_SKIP);
+ if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
+ fprintf(stderr, "immediate_exit not available, skipping test\n");
+ exit(KSFT_SKIP);
+ }
+
vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, type);
TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
"rc: %i errno: %i", vm->fd, errno);
@@ -130,7 +134,6 @@ struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages,
int perm, unsigned long type)
{
struct kvm_vm *vm;
- int kvm_fd;
vm = calloc(1, sizeof(*vm));
TEST_ASSERT(vm != NULL, "Insufficient Memory");
@@ -551,7 +554,6 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
uint32_t flags)
{
int ret;
- unsigned long pmem_size = 0;
struct userspace_mem_region *region;
size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size;
@@ -1121,6 +1123,22 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
return rc;
}
+void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ int ret;
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+
+ vcpu->state->immediate_exit = 1;
+ ret = ioctl(vcpu->fd, KVM_RUN, NULL);
+ vcpu->state->immediate_exit = 0;
+
+ TEST_ASSERT(ret == -1 && errno == EINTR,
+ "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
+ ret, errno);
+}
+
/*
* VM VCPU Set MP State
*
@@ -1229,6 +1247,40 @@ void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
ret, errno);
}
+#ifdef __x86_64__
+void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_nested_state *state)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ int ret;
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+
+ ret = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, state);
+ TEST_ASSERT(ret == 0,
+ "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
+ ret, errno);
+}
+
+int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_nested_state *state, bool ignore_error)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ int ret;
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+
+ ret = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, state);
+ if (!ignore_error) {
+ TEST_ASSERT(ret == 0,
+ "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
+ ret, errno);
+ }
+
+ return ret;
+}
+#endif
+
/*
* VM VCPU System Regs Get
*
@@ -1281,7 +1333,6 @@ void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
- int ret;
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
@@ -1531,3 +1582,39 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
{
return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
}
+
+/*
+ * Is Unrestricted Guest
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ *
+ * Output Args: None
+ *
+ * Return: True if the unrestricted guest is set to 'Y', otherwise return false.
+ *
+ * Check if the unrestricted guest flag is enabled.
+ */
+bool vm_is_unrestricted_guest(struct kvm_vm *vm)
+{
+ char val = 'N';
+ size_t count;
+ FILE *f;
+
+ if (vm == NULL) {
+ /* Ensure that the KVM vendor-specific module is loaded. */
+ f = fopen(KVM_DEV_PATH, "r");
+ TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d",
+ errno);
+ fclose(f);
+ }
+
+ f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r");
+ if (f) {
+ count = fread(&val, sizeof(char), 1, f);
+ TEST_ASSERT(count == 1, "Unable to read from param file.");
+ fclose(f);
+ }
+
+ return val == 'Y';
+}
diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
index 4595e42c6e29..265b7822f591 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h
+++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
@@ -1,9 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tools/testing/selftests/kvm/lib/kvm_util_internal.h
*
* Copyright (C) 2018, Google LLC.
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
*/
#ifndef SELFTEST_KVM_UTIL_INTERNAL_H
diff --git a/tools/testing/selftests/kvm/lib/sparsebit.c b/tools/testing/selftests/kvm/lib/sparsebit.c
index b132bc95d183..031ba3c932ed 100644
--- a/tools/testing/selftests/kvm/lib/sparsebit.c
+++ b/tools/testing/selftests/kvm/lib/sparsebit.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Sparse bit array
*
* Copyright (C) 2018, Google LLC.
* Copyright (C) 2018, Red Hat, Inc. (code style cleanup and fuzzing driver)
*
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
* This library provides functions to support a memory efficient bit array,
* with an index size of 2^64. A sparsebit array is allocated through
* the use sparsebit_alloc() and free'd via sparsebit_free(),
diff --git a/tools/testing/selftests/kvm/lib/ucall.c b/tools/testing/selftests/kvm/lib/ucall.c
index a2ab38be2f47..b701a01cfcb6 100644
--- a/tools/testing/selftests/kvm/lib/ucall.c
+++ b/tools/testing/selftests/kvm/lib/ucall.c
@@ -142,7 +142,7 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
vm_vaddr_t gva;
TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8,
"Unexpected ucall exit mmio address access");
- gva = *(vm_vaddr_t *)run->mmio.data;
+ memcpy(&gva, run->mmio.data, sizeof(gva));
memcpy(uc, addr_gva2hva(vm, gva), sizeof(*uc));
}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index f28127f4a3af..d2ad85fb01ac 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/x86_64/processor.c
*
* Copyright (C) 2018, Google LLC.
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
*/
#define _GNU_SOURCE /* for program_invocation_name */
@@ -229,8 +228,6 @@ void sregs_dump(FILE *stream, struct kvm_sregs *sregs,
void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
{
- int rc;
-
TEST_ASSERT(vm->mode == VM_MODE_P52V48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
@@ -549,7 +546,6 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
struct pageDirectoryPointerEntry *pdpe;
struct pageDirectoryEntry *pde;
struct pageTableEntry *pte;
- void *hva;
TEST_ASSERT(vm->mode == VM_MODE_P52V48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
@@ -582,6 +578,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
unmapped_gva:
TEST_ASSERT(false, "No mapping for vm virtual address, "
"gva: 0x%lx", gva);
+ exit(EXIT_FAILURE);
}
static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt, int gdt_memslot,
@@ -1030,6 +1027,14 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
nested_size, sizeof(state->nested_));
}
+ /*
+ * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
+ * guest state is consistent only after userspace re-enters the
+ * kernel with KVM_RUN. Complete IO prior to migrating state
+ * to a new VM.
+ */
+ vcpu_run_complete_io(vm, vcpuid);
+
nmsrs = kvm_get_num_msrs(vm);
list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
list->nmsrs = nmsrs;
@@ -1093,12 +1098,6 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int r;
- if (state->nested.size) {
- r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
- r);
- }
-
r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave);
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
r);
@@ -1130,4 +1129,26 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs);
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_REGS, r: %i",
r);
+
+ if (state->nested.size) {
+ r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
+ TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
+ r);
+ }
+}
+
+bool is_intel_cpu(void)
+{
+ int eax, ebx, ecx, edx;
+ const uint32_t *chunk;
+ const int leaf = 0;
+
+ __asm__ __volatile__(
+ "cpuid"
+ : /* output */ "=a"(eax), "=b"(ebx),
+ "=c"(ecx), "=d"(edx)
+ : /* input */ "0"(leaf), "2"(0));
+
+ chunk = (const uint32_t *)("GenuineIntel");
+ return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index 771ba6bf751c..fe56d159d65f 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/x86_64/vmx.c
*
* Copyright (C) 2018, Google LLC.
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
*/
#define _GNU_SOURCE /* for program_invocation_name */
diff --git a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
index d503a51fad30..63cc9c3f5ab6 100644
--- a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
@@ -87,22 +87,26 @@ int main(int argc, char *argv[])
while (1) {
rc = _vcpu_run(vm, VCPU_ID);
- if (run->exit_reason == KVM_EXIT_IO) {
- switch (get_ucall(vm, VCPU_ID, &uc)) {
- case UCALL_SYNC:
- /* emulate hypervisor clearing CR4.OSXSAVE */
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
- sregs.cr4 &= ~X86_CR4_OSXSAVE;
- vcpu_sregs_set(vm, VCPU_ID, &sregs);
- break;
- case UCALL_ABORT:
- TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
- break;
- case UCALL_DONE:
- goto done;
- default:
- TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
- }
+ TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Unexpected exit reason: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_SYNC:
+ /* emulate hypervisor clearing CR4.OSXSAVE */
+ vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ sregs.cr4 &= ~X86_CR4_OSXSAVE;
+ vcpu_sregs_set(vm, VCPU_ID, &sregs);
+ break;
+ case UCALL_ABORT:
+ TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
+ break;
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
}
}
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index c49c2a28b0eb..241919ef1eac 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -19,8 +19,6 @@
#define VCPU_ID 5
-static bool have_nested_state;
-
void l2_guest_code(void)
{
GUEST_SYNC(6);
@@ -73,7 +71,6 @@ void guest_code(struct vmx_pages *vmx_pages)
int main(int argc, char *argv[])
{
- struct vmx_pages *vmx_pages = NULL;
vm_vaddr_t vmx_pages_gva = 0;
struct kvm_regs regs1, regs2;
@@ -88,8 +85,6 @@ int main(int argc, char *argv[])
.args[0] = (unsigned long)&evmcs_ver
};
- struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
-
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code);
@@ -113,7 +108,7 @@ int main(int argc, char *argv[])
vcpu_regs_get(vm, VCPU_ID, &regs1);
- vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
for (stage = 1;; stage++) {
@@ -123,8 +118,6 @@ int main(int argc, char *argv[])
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
- memset(&regs1, 0, sizeof(regs1));
- vcpu_regs_get(vm, VCPU_ID, &regs1);
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_ABORT:
TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
@@ -144,12 +137,16 @@ int main(int argc, char *argv[])
stage, (ulong)uc.args[1]);
state = vcpu_save_state(vm, VCPU_ID);
+ memset(&regs1, 0, sizeof(regs1));
+ vcpu_regs_get(vm, VCPU_ID, &regs1);
+
kvm_vm_release(vm);
/* Restore state in a new VM. */
kvm_vm_restart(vm, O_RDWR);
vm_vcpu_add(vm, VCPU_ID, 0, 0);
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+ vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
vcpu_load_state(vm, VCPU_ID, state);
run = vcpu_state(vm, VCPU_ID);
free(state);
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
index 264425f75806..f72b3043db0e 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
@@ -52,15 +52,11 @@ static void test_hv_cpuid(struct kvm_cpuid2 *hv_cpuid_entries,
TEST_ASSERT(entry->index == 0,
".index field should be zero");
- TEST_ASSERT(entry->index == 0,
- ".index field should be zero");
-
TEST_ASSERT(entry->flags == 0,
".flags field should be zero");
- TEST_ASSERT(entry->padding[0] == entry->padding[1]
- == entry->padding[2] == 0,
- ".index field should be zero");
+ TEST_ASSERT(!entry->padding[0] && !entry->padding[1] &&
+ !entry->padding[2], "padding should be zero");
/*
* If needed for debug:
@@ -90,7 +86,6 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(struct kvm_vm *vm)
{
int nent = 20; /* should be enough */
static struct kvm_cpuid2 *cpuid;
- int ret;
cpuid = malloc(sizeof(*cpuid) + nent * sizeof(struct kvm_cpuid_entry2));
@@ -141,7 +136,13 @@ int main(int argc, char *argv[])
free(hv_cpuid_entries);
- vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
+ rv = _vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
+
+ if (rv) {
+ fprintf(stderr,
+ "Enlightened VMCS is unsupported, skip related test\n");
+ goto vm_free;
+ }
hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm);
if (!hv_cpuid_entries)
@@ -151,6 +152,7 @@ int main(int argc, char *argv[])
free(hv_cpuid_entries);
+vm_free:
kvm_vm_free(vm);
return 0;
diff --git a/tools/testing/selftests/kvm/x86_64/kvm_create_max_vcpus.c b/tools/testing/selftests/kvm/x86_64/kvm_create_max_vcpus.c
new file mode 100644
index 000000000000..6a3eec8da351
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/kvm_create_max_vcpus.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * kvm_create_max_vcpus
+ *
+ * Copyright (C) 2019, Google LLC.
+ *
+ * Test for KVM_CAP_MAX_VCPUS and KVM_CAP_MAX_VCPU_ID.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+#include "asm/kvm.h"
+#include "linux/kvm.h"
+
+void test_vcpu_creation(int first_vcpu_id, int num_vcpus)
+{
+ struct kvm_vm *vm;
+ int i;
+
+ printf("Testing creating %d vCPUs, with IDs %d...%d.\n",
+ num_vcpus, first_vcpu_id, first_vcpu_id + num_vcpus - 1);
+
+ vm = vm_create(VM_MODE_P52V48_4K, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+
+ for (i = 0; i < num_vcpus; i++) {
+ int vcpu_id = first_vcpu_id + i;
+
+ /* This asserts that the vCPU was created. */
+ vm_vcpu_add(vm, vcpu_id, 0, 0);
+ }
+
+ kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+ int kvm_max_vcpu_id = kvm_check_cap(KVM_CAP_MAX_VCPU_ID);
+ int kvm_max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
+
+ printf("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id);
+ printf("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus);
+
+ /*
+ * Upstream KVM prior to 4.8 does not support KVM_CAP_MAX_VCPU_ID.
+ * Userspace is supposed to use KVM_CAP_MAX_VCPUS as the maximum ID
+ * in this case.
+ */
+ if (!kvm_max_vcpu_id)
+ kvm_max_vcpu_id = kvm_max_vcpus;
+
+ TEST_ASSERT(kvm_max_vcpu_id >= kvm_max_vcpus,
+ "KVM_MAX_VCPU_ID (%d) must be at least as large as KVM_MAX_VCPUS (%d).",
+ kvm_max_vcpu_id, kvm_max_vcpus);
+
+ test_vcpu_creation(0, kvm_max_vcpus);
+
+ if (kvm_max_vcpu_id > kvm_max_vcpus)
+ test_vcpu_creation(
+ kvm_max_vcpu_id - kvm_max_vcpus, kvm_max_vcpus);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
new file mode 100644
index 000000000000..00bb97d76000
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
@@ -0,0 +1,126 @@
+/*
+ * mmio_warning_test
+ *
+ * Copyright (C) 2019, Google LLC.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Test that we don't get a kernel warning when we call KVM_RUN after a
+ * triple fault occurs. To get the triple fault to occur we call KVM_RUN
+ * on a VCPU that hasn't been properly setup.
+ *
+ */
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <kvm_util.h>
+#include <linux/kvm.h>
+#include <processor.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <test_util.h>
+#include <unistd.h>
+
+#define NTHREAD 4
+#define NPROCESS 5
+
+struct thread_context {
+ int kvmcpu;
+ struct kvm_run *run;
+};
+
+void *thr(void *arg)
+{
+ struct thread_context *tc = (struct thread_context *)arg;
+ int res;
+ int kvmcpu = tc->kvmcpu;
+ struct kvm_run *run = tc->run;
+
+ res = ioctl(kvmcpu, KVM_RUN, 0);
+ printf("ret1=%d exit_reason=%d suberror=%d\n",
+ res, run->exit_reason, run->internal.suberror);
+
+ return 0;
+}
+
+void test(void)
+{
+ int i, kvm, kvmvm, kvmcpu;
+ pthread_t th[NTHREAD];
+ struct kvm_run *run;
+ struct thread_context tc;
+
+ kvm = open("/dev/kvm", O_RDWR);
+ TEST_ASSERT(kvm != -1, "failed to open /dev/kvm");
+ kvmvm = ioctl(kvm, KVM_CREATE_VM, 0);
+ TEST_ASSERT(kvmvm != -1, "KVM_CREATE_VM failed");
+ kvmcpu = ioctl(kvmvm, KVM_CREATE_VCPU, 0);
+ TEST_ASSERT(kvmcpu != -1, "KVM_CREATE_VCPU failed");
+ run = (struct kvm_run *)mmap(0, 4096, PROT_READ|PROT_WRITE, MAP_SHARED,
+ kvmcpu, 0);
+ tc.kvmcpu = kvmcpu;
+ tc.run = run;
+ srand(getpid());
+ for (i = 0; i < NTHREAD; i++) {
+ pthread_create(&th[i], NULL, thr, (void *)(uintptr_t)&tc);
+ usleep(rand() % 10000);
+ }
+ for (i = 0; i < NTHREAD; i++)
+ pthread_join(th[i], NULL);
+}
+
+int get_warnings_count(void)
+{
+ int warnings;
+ FILE *f;
+
+ f = popen("dmesg | grep \"WARNING:\" | wc -l", "r");
+ fscanf(f, "%d", &warnings);
+ fclose(f);
+
+ return warnings;
+}
+
+int main(void)
+{
+ int warnings_before, warnings_after;
+
+ if (!is_intel_cpu()) {
+ printf("Must be run on an Intel CPU, skipping test\n");
+ exit(KSFT_SKIP);
+ }
+
+ if (vm_is_unrestricted_guest(NULL)) {
+ printf("Unrestricted guest must be disabled, skipping test\n");
+ exit(KSFT_SKIP);
+ }
+
+ warnings_before = get_warnings_count();
+
+ for (int i = 0; i < NPROCESS; ++i) {
+ int status;
+ int pid = fork();
+
+ if (pid < 0)
+ exit(1);
+ if (pid == 0) {
+ test();
+ exit(0);
+ }
+ while (waitpid(pid, &status, __WALL) != pid)
+ ;
+ }
+
+ warnings_after = get_warnings_count();
+ TEST_ASSERT(warnings_before == warnings_after,
+ "Warnings found in kernel. Run 'dmesg' to inspect them.");
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
index eb3e7a838cb4..40050e44ec0a 100644
--- a/tools/testing/selftests/kvm/x86_64/platform_info_test.c
+++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
@@ -81,7 +81,6 @@ static void test_msr_platform_info_disabled(struct kvm_vm *vm)
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
- struct kvm_run *state;
int rv;
uint64_t msr_platform_info;
diff --git a/tools/testing/selftests/kvm/x86_64/set_sregs_test.c b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
index 35640e8e95bc..9f7656184f31 100644
--- a/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
@@ -1,16 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* KVM_SET_SREGS tests
*
* Copyright (C) 2018, Google LLC.
*
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
* This is a regression test for the bug fixed by the following commit:
* d3802286fa0f ("kvm: x86: Disallow illegal IA32_APIC_BASE MSR values")
*
* That bug allowed a user-mode program that called the KVM_SET_SREGS
* ioctl to put a VCPU's local APIC into an invalid state.
- *
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <fcntl.h>
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
new file mode 100644
index 000000000000..4daf520bada1
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/smm_test.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018, Red Hat, Inc.
+ *
+ * Tests for SMM.
+ */
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+
+#include "vmx.h"
+
+#define VCPU_ID 1
+
+#define PAGE_SIZE 4096
+
+#define SMRAM_SIZE 65536
+#define SMRAM_MEMSLOT ((1 << 16) | 1)
+#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
+#define SMRAM_GPA 0x1000000
+#define SMRAM_STAGE 0xfe
+
+#define STR(x) #x
+#define XSTR(s) STR(s)
+
+#define SYNC_PORT 0xe
+#define DONE 0xff
+
+/*
+ * This is compiled as normal 64-bit code, however, SMI handler is executed
+ * in real-address mode. To stay simple we're limiting ourselves to a mode
+ * independent subset of asm here.
+ * SMI handler always report back fixed stage SMRAM_STAGE.
+ */
+uint8_t smi_handler[] = {
+ 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */
+ 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */
+ 0x0f, 0xaa, /* rsm */
+};
+
+void sync_with_host(uint64_t phase)
+{
+ asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"
+ : : "a" (phase));
+}
+
+void self_smi(void)
+{
+ wrmsr(APIC_BASE_MSR + (APIC_ICR >> 4),
+ APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
+}
+
+void guest_code(struct vmx_pages *vmx_pages)
+{
+ uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
+
+ sync_with_host(1);
+
+ wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE);
+
+ sync_with_host(2);
+
+ self_smi();
+
+ sync_with_host(4);
+
+ if (vmx_pages) {
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+
+ sync_with_host(5);
+
+ self_smi();
+
+ sync_with_host(7);
+ }
+
+ sync_with_host(DONE);
+}
+
+int main(int argc, char *argv[])
+{
+ vm_vaddr_t vmx_pages_gva = 0;
+
+ struct kvm_regs regs;
+ struct kvm_vm *vm;
+ struct kvm_run *run;
+ struct kvm_x86_state *state;
+ int stage, stage_reported;
+
+ /* Create VM */
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+ run = vcpu_state(vm, VCPU_ID);
+
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
+ SMRAM_MEMSLOT, SMRAM_PAGES, 0);
+ TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)
+ == SMRAM_GPA, "could not allocate guest physical addresses?");
+
+ memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE);
+ memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
+ sizeof(smi_handler));
+
+ vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA);
+
+ if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
+ vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+ } else {
+ printf("will skip SMM test with VMX enabled\n");
+ vcpu_args_set(vm, VCPU_ID, 1, 0);
+ }
+
+ for (stage = 1;; stage++) {
+ _vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Stage %d: unexpected exit reason: %u (%s),\n",
+ stage, run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ memset(&regs, 0, sizeof(regs));
+ vcpu_regs_get(vm, VCPU_ID, &regs);
+
+ stage_reported = regs.rax & 0xff;
+
+ if (stage_reported == DONE)
+ goto done;
+
+ TEST_ASSERT(stage_reported == stage ||
+ stage_reported == SMRAM_STAGE,
+ "Unexpected stage: #%x, got %x",
+ stage, stage_reported);
+
+ state = vcpu_save_state(vm, VCPU_ID);
+ kvm_vm_release(vm);
+ kvm_vm_restart(vm, O_RDWR);
+ vm_vcpu_add(vm, VCPU_ID, 0, 0);
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+ vcpu_load_state(vm, VCPU_ID, state);
+ run = vcpu_state(vm, VCPU_ID);
+ free(state);
+ }
+
+done:
+ kvm_vm_free(vm);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
index 4b3f556265f1..1a23617f34d9 100644
--- a/tools/testing/selftests/kvm/x86_64/state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* KVM_GET/SET_* tests
*
* Copyright (C) 2018, Red Hat, Inc.
*
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
* Tests for vCPU state save/restore, including nested guest state.
*/
#define _GNU_SOURCE /* for program_invocation_short_name */
@@ -22,8 +21,6 @@
#define VCPU_ID 5
-static bool have_nested_state;
-
void l2_guest_code(void)
{
GUEST_SYNC(6);
@@ -122,7 +119,6 @@ void guest_code(struct vmx_pages *vmx_pages)
int main(int argc, char *argv[])
{
- struct vmx_pages *vmx_pages = NULL;
vm_vaddr_t vmx_pages_gva = 0;
struct kvm_regs regs1, regs2;
@@ -132,8 +128,6 @@ int main(int argc, char *argv[])
struct ucall uc;
int stage;
- struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
-
/* Create VM */
vm = vm_create_default(VCPU_ID, 0, guest_code);
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
@@ -142,7 +136,7 @@ int main(int argc, char *argv[])
vcpu_regs_get(vm, VCPU_ID, &regs1);
if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
- vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
} else {
printf("will skip nested state checks\n");
@@ -156,8 +150,6 @@ int main(int argc, char *argv[])
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
- memset(&regs1, 0, sizeof(regs1));
- vcpu_regs_get(vm, VCPU_ID, &regs1);
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_ABORT:
TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
@@ -177,6 +169,9 @@ int main(int argc, char *argv[])
stage, (ulong)uc.args[1]);
state = vcpu_save_state(vm, VCPU_ID);
+ memset(&regs1, 0, sizeof(regs1));
+ vcpu_regs_get(vm, VCPU_ID, &regs1);
+
kvm_vm_release(vm);
/* Restore state in a new VM. */
diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
index c8478ce9ea77..11c2a70a7b87 100644
--- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Test for x86 KVM_CAP_SYNC_REGS
*
* Copyright (C) 2018, Google LLC.
*
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
* Verifies expected behavior of x86 KVM_CAP_SYNC_REGS functionality,
* including requesting an invalid register set, updates to/from values
* in kvm_run.s.regs when kvm_valid_regs and kvm_dirty_regs are toggled.
@@ -25,9 +24,15 @@
void guest_code(void)
{
+ /*
+ * use a callee-save register, otherwise the compiler
+ * saves it around the call to GUEST_SYNC.
+ */
+ register u32 stage asm("rbx");
for (;;) {
GUEST_SYNC(0);
- asm volatile ("inc %r11");
+ stage++;
+ asm volatile ("" : : "r" (stage));
}
}
@@ -147,7 +152,7 @@ int main(int argc, char *argv[])
compare_vcpu_events(&events, &run->s.regs.events);
/* Set and verify various register values. */
- run->s.regs.regs.r11 = 0xBAD1DEA;
+ run->s.regs.regs.rbx = 0xBAD1DEA;
run->s.regs.sregs.apic_base = 1 << 11;
/* TODO run->s.regs.events.XYZ = ABC; */
@@ -158,9 +163,9 @@ int main(int argc, char *argv[])
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- TEST_ASSERT(run->s.regs.regs.r11 == 0xBAD1DEA + 1,
- "r11 sync regs value incorrect 0x%llx.",
- run->s.regs.regs.r11);
+ TEST_ASSERT(run->s.regs.regs.rbx == 0xBAD1DEA + 1,
+ "rbx sync regs value incorrect 0x%llx.",
+ run->s.regs.regs.rbx);
TEST_ASSERT(run->s.regs.sregs.apic_base == 1 << 11,
"apic_base sync regs value incorrect 0x%llx.",
run->s.regs.sregs.apic_base);
@@ -179,15 +184,15 @@ int main(int argc, char *argv[])
*/
run->kvm_valid_regs = TEST_SYNC_FIELDS;
run->kvm_dirty_regs = 0;
- run->s.regs.regs.r11 = 0xDEADBEEF;
+ run->s.regs.regs.rbx = 0xDEADBEEF;
rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- TEST_ASSERT(run->s.regs.regs.r11 != 0xDEADBEEF,
- "r11 sync regs value incorrect 0x%llx.",
- run->s.regs.regs.r11);
+ TEST_ASSERT(run->s.regs.regs.rbx != 0xDEADBEEF,
+ "rbx sync regs value incorrect 0x%llx.",
+ run->s.regs.regs.rbx);
/* Clear kvm_valid_regs bits and kvm_dirty_bits.
* Verify s.regs values are not overwritten with existing guest values
@@ -195,21 +200,21 @@ int main(int argc, char *argv[])
*/
run->kvm_valid_regs = 0;
run->kvm_dirty_regs = 0;
- run->s.regs.regs.r11 = 0xAAAA;
- regs.r11 = 0xBAC0;
+ run->s.regs.regs.rbx = 0xAAAA;
+ regs.rbx = 0xBAC0;
vcpu_regs_set(vm, VCPU_ID, &regs);
rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- TEST_ASSERT(run->s.regs.regs.r11 == 0xAAAA,
- "r11 sync regs value incorrect 0x%llx.",
- run->s.regs.regs.r11);
+ TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA,
+ "rbx sync regs value incorrect 0x%llx.",
+ run->s.regs.regs.rbx);
vcpu_regs_get(vm, VCPU_ID, &regs);
- TEST_ASSERT(regs.r11 == 0xBAC0 + 1,
- "r11 guest value incorrect 0x%llx.",
- regs.r11);
+ TEST_ASSERT(regs.rbx == 0xBAC0 + 1,
+ "rbx guest value incorrect 0x%llx.",
+ regs.rbx);
/* Clear kvm_valid_regs bits. Verify s.regs values are not overwritten
* with existing guest values but that guest values are overwritten
@@ -217,19 +222,19 @@ int main(int argc, char *argv[])
*/
run->kvm_valid_regs = 0;
run->kvm_dirty_regs = TEST_SYNC_FIELDS;
- run->s.regs.regs.r11 = 0xBBBB;
+ run->s.regs.regs.rbx = 0xBBBB;
rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- TEST_ASSERT(run->s.regs.regs.r11 == 0xBBBB,
- "r11 sync regs value incorrect 0x%llx.",
- run->s.regs.regs.r11);
+ TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB,
+ "rbx sync regs value incorrect 0x%llx.",
+ run->s.regs.regs.rbx);
vcpu_regs_get(vm, VCPU_ID, &regs);
- TEST_ASSERT(regs.r11 == 0xBBBB + 1,
- "r11 guest value incorrect 0x%llx.",
- regs.r11);
+ TEST_ASSERT(regs.rbx == 0xBBBB + 1,
+ "rbx guest value incorrect 0x%llx.",
+ regs.rbx);
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c b/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
new file mode 100644
index 000000000000..3b0ffe01dacd
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vmx_close_while_nested
+ *
+ * Copyright (C) 2019, Red Hat, Inc.
+ *
+ * Verify that nothing bad happens if a KVM user exits with open
+ * file descriptors while executing a nested guest.
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "vmx.h"
+
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "kselftest.h"
+
+#define VCPU_ID 5
+
+enum {
+ PORT_L0_EXIT = 0x2000,
+};
+
+/* The virtual machine object. */
+static struct kvm_vm *vm;
+
+static void l2_guest_code(void)
+{
+ /* Exit to L0 */
+ asm volatile("inb %%dx, %%al"
+ : : [port] "d" (PORT_L0_EXIT) : "rax");
+}
+
+static void l1_guest_code(struct vmx_pages *vmx_pages)
+{
+#define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_ASSERT(load_vmcs(vmx_pages));
+
+ /* Prepare the VMCS for L2 execution. */
+ prepare_vmcs(vmx_pages, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ GUEST_ASSERT(!vmlaunch());
+ GUEST_ASSERT(0);
+}
+
+int main(int argc, char *argv[])
+{
+ vm_vaddr_t vmx_pages_gva;
+ struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
+
+ if (!(entry->ecx & CPUID_VMX)) {
+ fprintf(stderr, "nested VMX not enabled, skipping test\n");
+ exit(KSFT_SKIP);
+ }
+
+ vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+ /* Allocate VMX pages and shared descriptors (vmx_pages). */
+ vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+
+ for (;;) {
+ volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct ucall uc;
+
+ vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ if (run->io.port == PORT_L0_EXIT)
+ break;
+
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_ABORT:
+ TEST_ASSERT(false, "%s", (const char *)uc.args[0]);
+ /* NOT REACHED */
+ default:
+ TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
+ }
+ }
+}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
new file mode 100644
index 000000000000..ed7218d166da
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vmx_set_nested_state_test
+ *
+ * Copyright (C) 2019, Google LLC.
+ *
+ * This test verifies the integrity of calling the ioctl KVM_SET_NESTED_STATE.
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "vmx.h"
+
+#include <errno.h>
+#include <linux/kvm.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+/*
+ * Mirror of VMCS12_REVISION in arch/x86/kvm/vmx/vmcs12.h. If that value
+ * changes this should be updated.
+ */
+#define VMCS12_REVISION 0x11e57ed0
+#define VCPU_ID 5
+
+void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state)
+{
+ volatile struct kvm_run *run;
+
+ vcpu_nested_state_set(vm, VCPU_ID, state, false);
+ run = vcpu_state(vm, VCPU_ID);
+ vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
+ "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+}
+
+void test_nested_state_expect_errno(struct kvm_vm *vm,
+ struct kvm_nested_state *state,
+ int expected_errno)
+{
+ volatile struct kvm_run *run;
+ int rv;
+
+ rv = vcpu_nested_state_set(vm, VCPU_ID, state, true);
+ TEST_ASSERT(rv == -1 && errno == expected_errno,
+ "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
+ strerror(expected_errno), expected_errno, rv, strerror(errno),
+ errno);
+ run = vcpu_state(vm, VCPU_ID);
+ vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
+ "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+}
+
+void test_nested_state_expect_einval(struct kvm_vm *vm,
+ struct kvm_nested_state *state)
+{
+ test_nested_state_expect_errno(vm, state, EINVAL);
+}
+
+void test_nested_state_expect_efault(struct kvm_vm *vm,
+ struct kvm_nested_state *state)
+{
+ test_nested_state_expect_errno(vm, state, EFAULT);
+}
+
+void set_revision_id_for_vmcs12(struct kvm_nested_state *state,
+ u32 vmcs12_revision)
+{
+ /* Set revision_id in vmcs12 to vmcs12_revision. */
+ memcpy(&state->data, &vmcs12_revision, sizeof(u32));
+}
+
+void set_default_state(struct kvm_nested_state *state)
+{
+ memset(state, 0, sizeof(*state));
+ state->flags = KVM_STATE_NESTED_RUN_PENDING |
+ KVM_STATE_NESTED_GUEST_MODE;
+ state->format = 0;
+ state->size = sizeof(*state);
+}
+
+void set_default_vmx_state(struct kvm_nested_state *state, int size)
+{
+ memset(state, 0, size);
+ state->flags = KVM_STATE_NESTED_GUEST_MODE |
+ KVM_STATE_NESTED_RUN_PENDING |
+ KVM_STATE_NESTED_EVMCS;
+ state->format = 0;
+ state->size = size;
+ state->hdr.vmx.vmxon_pa = 0x1000;
+ state->hdr.vmx.vmcs12_pa = 0x2000;
+ state->hdr.vmx.smm.flags = 0;
+ set_revision_id_for_vmcs12(state, VMCS12_REVISION);
+}
+
+void test_vmx_nested_state(struct kvm_vm *vm)
+{
+ /* Add a page for VMCS12. */
+ const int state_sz = sizeof(struct kvm_nested_state) + getpagesize();
+ struct kvm_nested_state *state =
+ (struct kvm_nested_state *)malloc(state_sz);
+
+ /* The format must be set to 0. 0 for VMX, 1 for SVM. */
+ set_default_vmx_state(state, state_sz);
+ state->format = 1;
+ test_nested_state_expect_einval(vm, state);
+
+ /*
+ * We cannot virtualize anything if the guest does not have VMX
+ * enabled.
+ */
+ set_default_vmx_state(state, state_sz);
+ test_nested_state_expect_einval(vm, state);
+
+ /*
+ * We cannot virtualize anything if the guest does not have VMX
+ * enabled. We expect KVM_SET_NESTED_STATE to return 0 if vmxon_pa
+ * is set to -1ull, but the flags must be zero.
+ */
+ set_default_vmx_state(state, state_sz);
+ state->hdr.vmx.vmxon_pa = -1ull;
+ test_nested_state_expect_einval(vm, state);
+
+ state->hdr.vmx.vmcs12_pa = -1ull;
+ state->flags = KVM_STATE_NESTED_EVMCS;
+ test_nested_state_expect_einval(vm, state);
+
+ state->flags = 0;
+ test_nested_state(vm, state);
+
+ /* Enable VMX in the guest CPUID. */
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+ /*
+ * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
+ * setting the nested state but flags other than eVMCS must be clear.
+ */
+ set_default_vmx_state(state, state_sz);
+ state->hdr.vmx.vmxon_pa = -1ull;
+ state->hdr.vmx.vmcs12_pa = -1ull;
+ test_nested_state_expect_einval(vm, state);
+
+ state->flags = KVM_STATE_NESTED_EVMCS;
+ test_nested_state(vm, state);
+
+ /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
+ state->hdr.vmx.smm.flags = 1;
+ test_nested_state_expect_einval(vm, state);
+
+ /* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */
+ set_default_vmx_state(state, state_sz);
+ state->hdr.vmx.vmxon_pa = -1ull;
+ state->flags = 0;
+ test_nested_state_expect_einval(vm, state);
+
+ /* It is invalid to have vmxon_pa set to a non-page aligned address. */
+ set_default_vmx_state(state, state_sz);
+ state->hdr.vmx.vmxon_pa = 1;
+ test_nested_state_expect_einval(vm, state);
+
+ /*
+ * It is invalid to have KVM_STATE_NESTED_SMM_GUEST_MODE and
+ * KVM_STATE_NESTED_GUEST_MODE set together.
+ */
+ set_default_vmx_state(state, state_sz);
+ state->flags = KVM_STATE_NESTED_GUEST_MODE |
+ KVM_STATE_NESTED_RUN_PENDING;
+ state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
+ test_nested_state_expect_einval(vm, state);
+
+ /*
+ * It is invalid to have any of the SMM flags set besides:
+ * KVM_STATE_NESTED_SMM_GUEST_MODE
+ * KVM_STATE_NESTED_SMM_VMXON
+ */
+ set_default_vmx_state(state, state_sz);
+ state->hdr.vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE |
+ KVM_STATE_NESTED_SMM_VMXON);
+ test_nested_state_expect_einval(vm, state);
+
+ /* Outside SMM, SMM flags must be zero. */
+ set_default_vmx_state(state, state_sz);
+ state->flags = 0;
+ state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
+ test_nested_state_expect_einval(vm, state);
+
+ /* Size must be large enough to fit kvm_nested_state and vmcs12. */
+ set_default_vmx_state(state, state_sz);
+ state->size = sizeof(*state);
+ test_nested_state(vm, state);
+
+ /* vmxon_pa cannot be the same address as vmcs_pa. */
+ set_default_vmx_state(state, state_sz);
+ state->hdr.vmx.vmxon_pa = 0;
+ state->hdr.vmx.vmcs12_pa = 0;
+ test_nested_state_expect_einval(vm, state);
+
+ /* The revision id for vmcs12 must be VMCS12_REVISION. */
+ set_default_vmx_state(state, state_sz);
+ set_revision_id_for_vmcs12(state, 0);
+ test_nested_state_expect_einval(vm, state);
+
+ /*
+ * Test that if we leave nesting the state reflects that when we get
+ * it again.
+ */
+ set_default_vmx_state(state, state_sz);
+ state->hdr.vmx.vmxon_pa = -1ull;
+ state->hdr.vmx.vmcs12_pa = -1ull;
+ state->flags = 0;
+ test_nested_state(vm, state);
+ vcpu_nested_state_get(vm, VCPU_ID, state);
+ TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz,
+ "Size must be between %d and %d. The size returned was %d.",
+ sizeof(*state), state_sz, state->size);
+ TEST_ASSERT(state->hdr.vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull.");
+ TEST_ASSERT(state->hdr.vmx.vmcs12_pa == -1ull, "vmcs_pa must be -1ull.");
+
+ free(state);
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vm *vm;
+ struct kvm_nested_state state;
+ struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
+
+ if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
+ printf("KVM_CAP_NESTED_STATE not available, skipping test\n");
+ exit(KSFT_SKIP);
+ }
+
+ /*
+ * AMD currently does not implement set_nested_state, so for now we
+ * just early out.
+ */
+ if (!(entry->ecx & CPUID_VMX)) {
+ fprintf(stderr, "nested VMX not enabled, skipping test\n");
+ exit(KSFT_SKIP);
+ }
+
+ vm = vm_create_default(VCPU_ID, 0, 0);
+
+ /* Passing a NULL kvm_nested_state causes a EFAULT. */
+ test_nested_state_expect_efault(vm, NULL);
+
+ /* 'size' cannot be smaller than sizeof(kvm_nested_state). */
+ set_default_state(&state);
+ state.size = 0;
+ test_nested_state_expect_einval(vm, &state);
+
+ /*
+ * Setting the flags 0xf fails the flags check. The only flags that
+ * can be used are:
+ * KVM_STATE_NESTED_GUEST_MODE
+ * KVM_STATE_NESTED_RUN_PENDING
+ * KVM_STATE_NESTED_EVMCS
+ */
+ set_default_state(&state);
+ state.flags = 0xf;
+ test_nested_state_expect_einval(vm, &state);
+
+ /*
+ * If KVM_STATE_NESTED_RUN_PENDING is set then
+ * KVM_STATE_NESTED_GUEST_MODE has to be set as well.
+ */
+ set_default_state(&state);
+ state.flags = KVM_STATE_NESTED_RUN_PENDING;
+ test_nested_state_expect_einval(vm, &state);
+
+ /*
+ * TODO: When SVM support is added for KVM_SET_NESTED_STATE
+ * add tests here to support it like VMX.
+ */
+ if (entry->ecx & CPUID_VMX)
+ test_vmx_nested_state(vm);
+
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
index 18fa64db0d7a..f36c10eba71e 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* vmx_tsc_adjust_test
*
* Copyright (C) 2018, Google LLC.
*
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
- *
* IA32_TSC_ADJUST test
*
* According to the SDM, "if an execution of WRMSR to the
@@ -121,7 +119,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_DONE();
}
-void report(int64_t val)
+static void report(int64_t val)
{
printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
@@ -129,7 +127,6 @@ void report(int64_t val)
int main(int argc, char *argv[])
{
- struct vmx_pages *vmx_pages;
vm_vaddr_t vmx_pages_gva;
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
@@ -142,7 +139,7 @@ int main(int argc, char *argv[])
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
/* Allocate VMX pages and shared descriptors (vmx_pages). */
- vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
for (;;) {
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 8b0f16409ed7..077337195783 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -3,8 +3,12 @@
CC := $(CROSS_COMPILE)gcc
ifeq (0,$(MAKELEVEL))
-OUTPUT := $(shell pwd)
+ ifeq ($(OUTPUT),)
+ OUTPUT := $(shell pwd)
+ DEFAULT_INSTALL_HDR_PATH := 1
+ endif
endif
+selfdir = $(realpath $(dir $(filter %/lib.mk,$(MAKEFILE_LIST))))
# The following are built by lib.mk common compile rules.
# TEST_CUSTOM_PROGS should be used by tests that require
@@ -21,9 +25,34 @@ top_srcdir ?= ../../../..
include $(top_srcdir)/scripts/subarch.include
ARCH ?= $(SUBARCH)
+# set default goal to all, so make without a target runs all, even when
+# all isn't the first target in the file.
+.DEFAULT_GOAL := all
+
+# Invoke headers install with --no-builtin-rules to avoid circular
+# dependency in "make kselftest" case. In this case, second level
+# make inherits builtin-rules which will use the rule generate
+# Makefile.o and runs into
+# "Circular Makefile.o <- prepare dependency dropped."
+# and headers_install fails and test compile fails.
+# O= KBUILD_OUTPUT cases don't run into this error, since main Makefile
+# invokes them as sub-makes and --no-builtin-rules is not necessary,
+# but doesn't cause any failures. Keep it simple and use the same
+# flags in both cases.
+# Note that the support to install headers from lib.mk is necessary
+# when test Makefile is run directly with "make -C".
+# When local build is done, headers are installed in the default
+# INSTALL_HDR_PATH usr/include.
.PHONY: khdr
khdr:
- make ARCH=$(ARCH) -C $(top_srcdir) headers_install
+ifndef KSFT_KHDR_INSTALL_DONE
+ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
+ make --no-builtin-rules ARCH=$(ARCH) -C $(top_srcdir) headers_install
+else
+ make --no-builtin-rules INSTALL_HDR_PATH=$$OUTPUT/usr \
+ ARCH=$(ARCH) -C $(top_srcdir) headers_install
+endif
+endif
all: khdr $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
else
@@ -31,44 +60,13 @@ all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
endif
.ONESHELL:
-define RUN_TEST_PRINT_RESULT
- TEST_HDR_MSG="selftests: "`basename $$PWD`:" $$BASENAME_TEST"; \
- echo $$TEST_HDR_MSG; \
- echo "========================================"; \
- if [ ! -x $$TEST ]; then \
- echo "$$TEST_HDR_MSG: Warning: file $$BASENAME_TEST is not executable, correct this.";\
- echo "not ok 1..$$test_num $$TEST_HDR_MSG [FAIL]"; \
- else \
- cd `dirname $$TEST` > /dev/null; \
- if [ "X$(summary)" != "X" ]; then \
- (./$$BASENAME_TEST > /tmp/$$BASENAME_TEST 2>&1 && \
- echo "ok 1..$$test_num $$TEST_HDR_MSG [PASS]") || \
- (if [ $$? -eq $$skip ]; then \
- echo "not ok 1..$$test_num $$TEST_HDR_MSG [SKIP]"; \
- else echo "not ok 1..$$test_num $$TEST_HDR_MSG [FAIL]"; \
- fi;) \
- else \
- (./$$BASENAME_TEST && \
- echo "ok 1..$$test_num $$TEST_HDR_MSG [PASS]") || \
- (if [ $$? -eq $$skip ]; then \
- echo "not ok 1..$$test_num $$TEST_HDR_MSG [SKIP]"; \
- else echo "not ok 1..$$test_num $$TEST_HDR_MSG [FAIL]"; \
- fi;) \
- fi; \
- cd - > /dev/null; \
- fi;
-endef
-
define RUN_TESTS
- @export KSFT_TAP_LEVEL=`echo 1`; \
- test_num=`echo 0`; \
- skip=`echo 4`; \
- echo "TAP version 13"; \
- for TEST in $(1); do \
- BASENAME_TEST=`basename $$TEST`; \
- test_num=`echo $$test_num+1 | bc`; \
- $(call RUN_TEST_PRINT_RESULT,$(TEST),$(BASENAME_TEST),$(test_num),$(skip)) \
- done;
+ @BASE_DIR="$(selfdir)"; \
+ . $(selfdir)/kselftest/runner.sh; \
+ if [ "X$(summary)" != "X" ]; then \
+ per_test_logging=1; \
+ fi; \
+ run_many $(1)
endef
run_tests: all
@@ -105,24 +103,12 @@ else
$(error Error: set INSTALL_PATH to use install)
endif
-define EMIT_TESTS
- @test_num=`echo 0`; \
+emit_tests:
for TEST in $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS); do \
BASENAME_TEST=`basename $$TEST`; \
- test_num=`echo $$test_num+1 | bc`; \
- TEST_HDR_MSG="selftests: "`basename $$PWD`:" $$BASENAME_TEST"; \
- echo "echo $$TEST_HDR_MSG"; \
- if [ ! -x $$TEST ]; then \
- echo "echo \"$$TEST_HDR_MSG: Warning: file $$BASENAME_TEST is not executable, correct this.\""; \
- echo "echo \"not ok 1..$$test_num $$TEST_HDR_MSG [FAIL]\""; \
- else
- echo "(./$$BASENAME_TEST >> \$$OUTPUT 2>&1 && echo \"ok 1..$$test_num $$TEST_HDR_MSG [PASS]\") || (if [ \$$? -eq \$$skip ]; then echo \"not ok 1..$$test_num $$TEST_HDR_MSG [SKIP]\"; else echo \"not ok 1..$$test_num $$TEST_HDR_MSG [FAIL]\"; fi;)"; \
- fi; \
- done;
-endef
-
-emit_tests:
- $(EMIT_TESTS)
+ echo " \\"; \
+ echo -n " \"$$BASENAME_TEST\""; \
+ done; \
# define if isn't already. It is undefined in make O= case.
ifeq ($(RM),)
diff --git a/tools/testing/selftests/lib/Makefile b/tools/testing/selftests/lib/Makefile
index 70d5711e3ac8..a105f094676e 100644
--- a/tools/testing/selftests/lib/Makefile
+++ b/tools/testing/selftests/lib/Makefile
@@ -1,8 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
# Makefile for lib/ function selftests
# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
all:
-TEST_PROGS := printf.sh bitmap.sh prime_numbers.sh
+TEST_PROGS := printf.sh bitmap.sh prime_numbers.sh strscpy.sh
include ../lib.mk
diff --git a/tools/testing/selftests/lib/bitmap.sh b/tools/testing/selftests/lib/bitmap.sh
index 5a90006d1aea..5511dddc5c2d 100755
--- a/tools/testing/selftests/lib/bitmap.sh
+++ b/tools/testing/selftests/lib/bitmap.sh
@@ -1,19 +1,3 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
-
-# Kselftest framework requirement - SKIP code is 4.
-ksft_skip=4
-
-# Runs bitmap infrastructure tests using test_bitmap kernel module
-if ! /sbin/modprobe -q -n test_bitmap; then
- echo "bitmap: module test_bitmap is not found [SKIP]"
- exit $ksft_skip
-fi
-
-if /sbin/modprobe -q test_bitmap; then
- /sbin/modprobe -q -r test_bitmap
- echo "bitmap: ok"
-else
- echo "bitmap: [FAIL]"
- exit 1
-fi
+$(dirname $0)/../kselftest_module.sh "bitmap" test_bitmap
diff --git a/tools/testing/selftests/lib/config b/tools/testing/selftests/lib/config
index 126933bcc950..14a77ea4a8da 100644
--- a/tools/testing/selftests/lib/config
+++ b/tools/testing/selftests/lib/config
@@ -1,3 +1,4 @@
CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_PRIME_NUMBERS=m
+CONFIG_TEST_STRSCPY=m
diff --git a/tools/testing/selftests/lib/prime_numbers.sh b/tools/testing/selftests/lib/prime_numbers.sh
index 78e7483c8d60..43b28f24e453 100755
--- a/tools/testing/selftests/lib/prime_numbers.sh
+++ b/tools/testing/selftests/lib/prime_numbers.sh
@@ -1,19 +1,4 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# Checks fast/slow prime_number generation for inconsistencies
-
-# Kselftest framework requirement - SKIP code is 4.
-ksft_skip=4
-
-if ! /sbin/modprobe -q -n prime_numbers; then
- echo "prime_numbers: module prime_numbers is not found [SKIP]"
- exit $ksft_skip
-fi
-
-if /sbin/modprobe -q prime_numbers selftest=65536; then
- /sbin/modprobe -q -r prime_numbers
- echo "prime_numbers: ok"
-else
- echo "prime_numbers: [FAIL]"
- exit 1
-fi
+$(dirname $0)/../kselftest_module.sh "prime numbers" prime_numbers selftest=65536
diff --git a/tools/testing/selftests/lib/printf.sh b/tools/testing/selftests/lib/printf.sh
index 45a23e2d64ad..2ffa61da0296 100755
--- a/tools/testing/selftests/lib/printf.sh
+++ b/tools/testing/selftests/lib/printf.sh
@@ -1,19 +1,4 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
-# Runs printf infrastructure using test_printf kernel module
-
-# Kselftest framework requirement - SKIP code is 4.
-ksft_skip=4
-
-if ! /sbin/modprobe -q -n test_printf; then
- echo "printf: module test_printf is not found [SKIP]"
- exit $ksft_skip
-fi
-
-if /sbin/modprobe -q test_printf; then
- /sbin/modprobe -q -r test_printf
- echo "printf: ok"
-else
- echo "printf: [FAIL]"
- exit 1
-fi
+# Tests the printf infrastructure using test_printf kernel module.
+$(dirname $0)/../kselftest_module.sh "printf" test_printf
diff --git a/tools/testing/selftests/lib/strscpy.sh b/tools/testing/selftests/lib/strscpy.sh
new file mode 100755
index 000000000000..71f2be6afba6
--- /dev/null
+++ b/tools/testing/selftests/lib/strscpy.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+$(dirname $0)/../kselftest_module.sh "strscpy*" test_strscpy
diff --git a/tools/testing/selftests/livepatch/Makefile b/tools/testing/selftests/livepatch/Makefile
new file mode 100644
index 000000000000..fd405402c3ff
--- /dev/null
+++ b/tools/testing/selftests/livepatch/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+
+TEST_PROGS_EXTENDED := functions.sh
+TEST_PROGS := \
+ test-livepatch.sh \
+ test-callbacks.sh \
+ test-shadow-vars.sh
+
+include ../lib.mk
diff --git a/tools/testing/selftests/livepatch/README b/tools/testing/selftests/livepatch/README
new file mode 100644
index 000000000000..b73cd0e2dd51
--- /dev/null
+++ b/tools/testing/selftests/livepatch/README
@@ -0,0 +1,43 @@
+====================
+Livepatch Self Tests
+====================
+
+This is a small set of sanity tests for the kernel livepatching.
+
+The test suite loads and unloads several test kernel modules to verify
+livepatch behavior. Debug information is logged to the kernel's message
+buffer and parsed for expected messages. (Note: the tests will clear
+the message buffer between individual tests.)
+
+
+Config
+------
+
+Set these config options and their prerequisites:
+
+CONFIG_LIVEPATCH=y
+CONFIG_TEST_LIVEPATCH=m
+
+
+Running the tests
+-----------------
+
+Test kernel modules are built as part of lib/ (make modules) and need to
+be installed (make modules_install) as the test scripts will modprobe
+them.
+
+To run the livepatch selftests, from the top of the kernel source tree:
+
+ % make -C tools/testing/selftests TARGETS=livepatch run_tests
+
+
+Adding tests
+------------
+
+See the common functions.sh file for the existing collection of utility
+functions, most importantly set_dynamic_debug() and check_result(). The
+latter function greps the kernel's ring buffer for "livepatch:" and
+"test_klp" strings, so tests be sure to include one of those strings for
+result comparison. Other utility functions include general module
+loading and livepatch loading helpers (waiting for patch transitions,
+sysfs entries, etc.)
diff --git a/tools/testing/selftests/livepatch/config b/tools/testing/selftests/livepatch/config
new file mode 100644
index 000000000000..0dd7700464a8
--- /dev/null
+++ b/tools/testing/selftests/livepatch/config
@@ -0,0 +1 @@
+CONFIG_TEST_LIVEPATCH=m
diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh
new file mode 100644
index 000000000000..30195449c63c
--- /dev/null
+++ b/tools/testing/selftests/livepatch/functions.sh
@@ -0,0 +1,198 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+# Shell functions for the rest of the scripts.
+
+MAX_RETRIES=600
+RETRY_INTERVAL=".1" # seconds
+
+# log(msg) - write message to kernel log
+# msg - insightful words
+function log() {
+ echo "$1" > /dev/kmsg
+}
+
+# die(msg) - game over, man
+# msg - dying words
+function die() {
+ log "ERROR: $1"
+ echo "ERROR: $1" >&2
+ exit 1
+}
+
+# set_dynamic_debug() - setup kernel dynamic debug
+# TODO - push and pop this config?
+function set_dynamic_debug() {
+ cat << EOF > /sys/kernel/debug/dynamic_debug/control
+file kernel/livepatch/* +p
+func klp_try_switch_task -p
+EOF
+}
+
+# loop_until(cmd) - loop a command until it is successful or $MAX_RETRIES,
+# sleep $RETRY_INTERVAL between attempts
+# cmd - command and its arguments to run
+function loop_until() {
+ local cmd="$*"
+ local i=0
+ while true; do
+ eval "$cmd" && return 0
+ [[ $((i++)) -eq $MAX_RETRIES ]] && return 1
+ sleep $RETRY_INTERVAL
+ done
+}
+
+function is_livepatch_mod() {
+ local mod="$1"
+
+ if [[ $(modinfo "$mod" | awk '/^livepatch:/{print $NF}') == "Y" ]]; then
+ return 0
+ fi
+
+ return 1
+}
+
+function __load_mod() {
+ local mod="$1"; shift
+
+ local msg="% modprobe $mod $*"
+ log "${msg%% }"
+ ret=$(modprobe "$mod" "$@" 2>&1)
+ if [[ "$ret" != "" ]]; then
+ die "$ret"
+ fi
+
+ # Wait for module in sysfs ...
+ loop_until '[[ -e "/sys/module/$mod" ]]' ||
+ die "failed to load module $mod"
+}
+
+
+# load_mod(modname, params) - load a kernel module
+# modname - module name to load
+# params - module parameters to pass to modprobe
+function load_mod() {
+ local mod="$1"; shift
+
+ is_livepatch_mod "$mod" &&
+ die "use load_lp() to load the livepatch module $mod"
+
+ __load_mod "$mod" "$@"
+}
+
+# load_lp_nowait(modname, params) - load a kernel module with a livepatch
+# but do not wait on until the transition finishes
+# modname - module name to load
+# params - module parameters to pass to modprobe
+function load_lp_nowait() {
+ local mod="$1"; shift
+
+ is_livepatch_mod "$mod" ||
+ die "module $mod is not a livepatch"
+
+ __load_mod "$mod" "$@"
+
+ # Wait for livepatch in sysfs ...
+ loop_until '[[ -e "/sys/kernel/livepatch/$mod" ]]' ||
+ die "failed to load module $mod (sysfs)"
+}
+
+# load_lp(modname, params) - load a kernel module with a livepatch
+# modname - module name to load
+# params - module parameters to pass to modprobe
+function load_lp() {
+ local mod="$1"; shift
+
+ load_lp_nowait "$mod" "$@"
+
+ # Wait until the transition finishes ...
+ loop_until 'grep -q '^0$' /sys/kernel/livepatch/$mod/transition' ||
+ die "failed to complete transition"
+}
+
+# load_failing_mod(modname, params) - load a kernel module, expect to fail
+# modname - module name to load
+# params - module parameters to pass to modprobe
+function load_failing_mod() {
+ local mod="$1"; shift
+
+ local msg="% modprobe $mod $*"
+ log "${msg%% }"
+ ret=$(modprobe "$mod" "$@" 2>&1)
+ if [[ "$ret" == "" ]]; then
+ die "$mod unexpectedly loaded"
+ fi
+ log "$ret"
+}
+
+# unload_mod(modname) - unload a kernel module
+# modname - module name to unload
+function unload_mod() {
+ local mod="$1"
+
+ # Wait for module reference count to clear ...
+ loop_until '[[ $(cat "/sys/module/$mod/refcnt") == "0" ]]' ||
+ die "failed to unload module $mod (refcnt)"
+
+ log "% rmmod $mod"
+ ret=$(rmmod "$mod" 2>&1)
+ if [[ "$ret" != "" ]]; then
+ die "$ret"
+ fi
+
+ # Wait for module in sysfs ...
+ loop_until '[[ ! -e "/sys/module/$mod" ]]' ||
+ die "failed to unload module $mod (/sys/module)"
+}
+
+# unload_lp(modname) - unload a kernel module with a livepatch
+# modname - module name to unload
+function unload_lp() {
+ unload_mod "$1"
+}
+
+# disable_lp(modname) - disable a livepatch
+# modname - module name to unload
+function disable_lp() {
+ local mod="$1"
+
+ log "% echo 0 > /sys/kernel/livepatch/$mod/enabled"
+ echo 0 > /sys/kernel/livepatch/"$mod"/enabled
+
+ # Wait until the transition finishes and the livepatch gets
+ # removed from sysfs...
+ loop_until '[[ ! -e "/sys/kernel/livepatch/$mod" ]]' ||
+ die "failed to disable livepatch $mod"
+}
+
+# set_pre_patch_ret(modname, pre_patch_ret)
+# modname - module name to set
+# pre_patch_ret - new pre_patch_ret value
+function set_pre_patch_ret {
+ local mod="$1"; shift
+ local ret="$1"
+
+ log "% echo $ret > /sys/module/$mod/parameters/pre_patch_ret"
+ echo "$ret" > /sys/module/"$mod"/parameters/pre_patch_ret
+
+ # Wait for sysfs value to hold ...
+ loop_until '[[ $(cat "/sys/module/$mod/parameters/pre_patch_ret") == "$ret" ]]' ||
+ die "failed to set pre_patch_ret parameter for $mod module"
+}
+
+# check_result() - verify dmesg output
+# TODO - better filter, out of order msgs, etc?
+function check_result {
+ local expect="$*"
+ local result
+
+ result=$(dmesg | grep -v 'tainting' | grep -e 'livepatch:' -e 'test_klp' | sed 's/^\[[ 0-9.]*\] //')
+
+ if [[ "$expect" == "$result" ]] ; then
+ echo "ok"
+ else
+ echo -e "not ok\n\n$(diff -upr --label expected --label result <(echo "$expect") <(echo "$result"))\n"
+ die "livepatch kselftest(s) failed"
+ fi
+}
diff --git a/tools/testing/selftests/livepatch/test-callbacks.sh b/tools/testing/selftests/livepatch/test-callbacks.sh
new file mode 100755
index 000000000000..e97a9dcb73c7
--- /dev/null
+++ b/tools/testing/selftests/livepatch/test-callbacks.sh
@@ -0,0 +1,587 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+. $(dirname $0)/functions.sh
+
+MOD_LIVEPATCH=test_klp_callbacks_demo
+MOD_LIVEPATCH2=test_klp_callbacks_demo2
+MOD_TARGET=test_klp_callbacks_mod
+MOD_TARGET_BUSY=test_klp_callbacks_busy
+
+set_dynamic_debug
+
+
+# TEST: target module before livepatch
+#
+# Test a combination of loading a kernel module and a livepatch that
+# patches a function in the first module. Load the target module
+# before the livepatch module. Unload them in the same order.
+#
+# - On livepatch enable, before the livepatch transition starts,
+# pre-patch callbacks are executed for vmlinux and $MOD_TARGET (those
+# klp_objects currently loaded). After klp_objects are patched
+# according to the klp_patch, their post-patch callbacks run and the
+# transition completes.
+#
+# - Similarly, on livepatch disable, pre-patch callbacks run before the
+# unpatching transition starts. klp_objects are reverted, post-patch
+# callbacks execute and the transition completes.
+
+echo -n "TEST: target module before livepatch ... "
+dmesg -C
+
+load_mod $MOD_TARGET
+load_lp $MOD_LIVEPATCH
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+unload_mod $MOD_TARGET
+
+check_result "% modprobe $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_init
+% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+$MOD_LIVEPATCH: post_patch_callback: $MOD_TARGET -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
+$MOD_LIVEPATCH: pre_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+$MOD_LIVEPATCH: post_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH
+% rmmod $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_exit"
+
+
+# TEST: module_coming notifier
+#
+# This test is similar to the previous test, but (un)load the livepatch
+# module before the target kernel module. This tests the livepatch
+# core's module_coming handler.
+#
+# - On livepatch enable, only pre/post-patch callbacks are executed for
+# currently loaded klp_objects, in this case, vmlinux.
+#
+# - When a targeted module is subsequently loaded, only its
+# pre/post-patch callbacks are executed.
+#
+# - On livepatch disable, all currently loaded klp_objects' (vmlinux and
+# $MOD_TARGET) pre/post-unpatch callbacks are executed.
+
+echo -n "TEST: module_coming notifier ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+load_mod $MOD_TARGET
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+unload_mod $MOD_TARGET
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': patching complete
+% modprobe $MOD_TARGET
+livepatch: applying patch '$MOD_LIVEPATCH' to loading module '$MOD_TARGET'
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
+$MOD_LIVEPATCH: post_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
+$MOD_TARGET: ${MOD_TARGET}_init
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
+$MOD_LIVEPATCH: pre_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+$MOD_LIVEPATCH: post_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH
+% rmmod $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_exit"
+
+
+# TEST: module_going notifier
+#
+# Test loading the livepatch after a targeted kernel module, then unload
+# the kernel module before disabling the livepatch. This tests the
+# livepatch core's module_going handler.
+#
+# - First load a target module, then the livepatch.
+#
+# - When a target module is unloaded, the livepatch is only reverted
+# from that klp_object ($MOD_TARGET). As such, only its pre and
+# post-unpatch callbacks are executed when this occurs.
+#
+# - When the livepatch is disabled, pre and post-unpatch callbacks are
+# run for the remaining klp_object, vmlinux.
+
+echo -n "TEST: module_going notifier ... "
+dmesg -C
+
+load_mod $MOD_TARGET
+load_lp $MOD_LIVEPATCH
+unload_mod $MOD_TARGET
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+check_result "% modprobe $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_init
+% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+$MOD_LIVEPATCH: post_patch_callback: $MOD_TARGET -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': patching complete
+% rmmod $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_exit
+$MOD_LIVEPATCH: pre_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_GOING] Going away
+livepatch: reverting patch '$MOD_LIVEPATCH' on unloading module '$MOD_TARGET'
+$MOD_LIVEPATCH: post_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_GOING] Going away
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
+
+# TEST: module_coming and module_going notifiers
+#
+# This test is similar to the previous test, however the livepatch is
+# loaded first. This tests the livepatch core's module_coming and
+# module_going handlers.
+#
+# - First load the livepatch.
+#
+# - When a targeted kernel module is subsequently loaded, only its
+# pre/post-patch callbacks are executed.
+#
+# - When the target module is unloaded, the livepatch is only reverted
+# from the $MOD_TARGET klp_object. As such, only pre and
+# post-unpatch callbacks are executed when this occurs.
+
+echo -n "TEST: module_coming and module_going notifiers ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+load_mod $MOD_TARGET
+unload_mod $MOD_TARGET
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': patching complete
+% modprobe $MOD_TARGET
+livepatch: applying patch '$MOD_LIVEPATCH' to loading module '$MOD_TARGET'
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
+$MOD_LIVEPATCH: post_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
+$MOD_TARGET: ${MOD_TARGET}_init
+% rmmod $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_exit
+$MOD_LIVEPATCH: pre_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_GOING] Going away
+livepatch: reverting patch '$MOD_LIVEPATCH' on unloading module '$MOD_TARGET'
+$MOD_LIVEPATCH: post_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_GOING] Going away
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
+
+# TEST: target module not present
+#
+# A simple test of loading a livepatch without one of its patch target
+# klp_objects ever loaded ($MOD_TARGET).
+#
+# - Load the livepatch.
+#
+# - As expected, only pre/post-(un)patch handlers are executed for
+# vmlinux.
+
+echo -n "TEST: target module not present ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
+
+# TEST: pre-patch callback -ENODEV
+#
+# Test a scenario where a vmlinux pre-patch callback returns a non-zero
+# status (ie, failure).
+#
+# - First load a target module.
+#
+# - Load the livepatch module, setting its 'pre_patch_ret' value to -19
+# (-ENODEV). When its vmlinux pre-patch callback executes, this
+# status code will propagate back to the module-loading subsystem.
+# The result is that the insmod command refuses to load the livepatch
+# module.
+
+echo -n "TEST: pre-patch callback -ENODEV ... "
+dmesg -C
+
+load_mod $MOD_TARGET
+load_failing_mod $MOD_LIVEPATCH pre_patch_ret=-19
+unload_mod $MOD_TARGET
+
+check_result "% modprobe $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_init
+% modprobe $MOD_LIVEPATCH pre_patch_ret=-19
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+test_klp_callbacks_demo: pre_patch_callback: vmlinux
+livepatch: pre-patch callback failed for object 'vmlinux'
+livepatch: failed to enable patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': canceling patching transition, going to unpatch
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+modprobe: ERROR: could not insert '$MOD_LIVEPATCH': No such device
+% rmmod $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_exit"
+
+
+# TEST: module_coming + pre-patch callback -ENODEV
+#
+# Similar to the previous test, setup a livepatch such that its vmlinux
+# pre-patch callback returns success. However, when a targeted kernel
+# module is later loaded, have the livepatch return a failing status
+# code.
+#
+# - Load the livepatch, vmlinux pre-patch callback succeeds.
+#
+# - Set a trap so subsequent pre-patch callbacks to this livepatch will
+# return -ENODEV.
+#
+# - The livepatch pre-patch callback for subsequently loaded target
+# modules will return failure, so the module loader refuses to load
+# the kernel module. No post-patch or pre/post-unpatch callbacks are
+# executed for this klp_object.
+#
+# - Pre/post-unpatch callbacks are run for the vmlinux klp_object.
+
+echo -n "TEST: module_coming + pre-patch callback -ENODEV ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+set_pre_patch_ret $MOD_LIVEPATCH -19
+load_failing_mod $MOD_TARGET
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': patching complete
+% echo -19 > /sys/module/$MOD_LIVEPATCH/parameters/pre_patch_ret
+% modprobe $MOD_TARGET
+livepatch: applying patch '$MOD_LIVEPATCH' to loading module '$MOD_TARGET'
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
+livepatch: pre-patch callback failed for object '$MOD_TARGET'
+livepatch: patch '$MOD_LIVEPATCH' failed for module '$MOD_TARGET', refusing to load module '$MOD_TARGET'
+modprobe: ERROR: could not insert '$MOD_TARGET': No such device
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
+
+# TEST: multiple target modules
+#
+# Test loading multiple targeted kernel modules. This test-case is
+# mainly for comparing with the next test-case.
+#
+# - Load a target "busy" kernel module which kicks off a worker function
+# that immediately exits.
+#
+# - Proceed with loading the livepatch and another ordinary target
+# module. Post-patch callbacks are executed and the transition
+# completes quickly.
+
+echo -n "TEST: multiple target modules ... "
+dmesg -C
+
+load_mod $MOD_TARGET_BUSY sleep_secs=0
+# give $MOD_TARGET_BUSY::busymod_work_func() a chance to run
+sleep 5
+load_lp $MOD_LIVEPATCH
+load_mod $MOD_TARGET
+unload_mod $MOD_TARGET
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+unload_mod $MOD_TARGET_BUSY
+
+check_result "% modprobe $MOD_TARGET_BUSY sleep_secs=0
+$MOD_TARGET_BUSY: ${MOD_TARGET_BUSY}_init
+$MOD_TARGET_BUSY: busymod_work_func, sleeping 0 seconds ...
+$MOD_TARGET_BUSY: busymod_work_func exit
+% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET_BUSY -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+$MOD_LIVEPATCH: post_patch_callback: $MOD_TARGET_BUSY -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': patching complete
+% modprobe $MOD_TARGET
+livepatch: applying patch '$MOD_LIVEPATCH' to loading module '$MOD_TARGET'
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
+$MOD_LIVEPATCH: post_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
+$MOD_TARGET: ${MOD_TARGET}_init
+% rmmod $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_exit
+$MOD_LIVEPATCH: pre_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_GOING] Going away
+livepatch: reverting patch '$MOD_LIVEPATCH' on unloading module '$MOD_TARGET'
+$MOD_LIVEPATCH: post_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_GOING] Going away
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
+$MOD_LIVEPATCH: pre_unpatch_callback: $MOD_TARGET_BUSY -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+$MOD_LIVEPATCH: post_unpatch_callback: $MOD_TARGET_BUSY -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH
+% rmmod $MOD_TARGET_BUSY
+$MOD_TARGET_BUSY: ${MOD_TARGET_BUSY}_exit"
+
+
+
+# TEST: busy target module
+#
+# A similar test as the previous one, but force the "busy" kernel module
+# to do longer work.
+#
+# The livepatching core will refuse to patch a task that is currently
+# executing a to-be-patched function -- the consistency model stalls the
+# current patch transition until this safety-check is met. Test a
+# scenario where one of a livepatch's target klp_objects sits on such a
+# function for a long time. Meanwhile, load and unload other target
+# kernel modules while the livepatch transition is in progress.
+#
+# - Load the "busy" kernel module, this time make it do 10 seconds worth
+# of work.
+#
+# - Meanwhile, the livepatch is loaded. Notice that the patch
+# transition does not complete as the targeted "busy" module is
+# sitting on a to-be-patched function.
+#
+# - Load a second target module (this one is an ordinary idle kernel
+# module). Note that *no* post-patch callbacks will be executed while
+# the livepatch is still in transition.
+#
+# - Request an unload of the simple kernel module. The patch is still
+# transitioning, so its pre-unpatch callbacks are skipped.
+#
+# - Finally the livepatch is disabled. Since none of the patch's
+# klp_object's post-patch callbacks executed, the remaining
+# klp_object's pre-unpatch callbacks are skipped.
+
+echo -n "TEST: busy target module ... "
+dmesg -C
+
+load_mod $MOD_TARGET_BUSY sleep_secs=10
+load_lp_nowait $MOD_LIVEPATCH
+# Don't wait for transition, load $MOD_TARGET while the transition
+# is still stalled in $MOD_TARGET_BUSY::busymod_work_func()
+sleep 5
+load_mod $MOD_TARGET
+unload_mod $MOD_TARGET
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+unload_mod $MOD_TARGET_BUSY
+
+check_result "% modprobe $MOD_TARGET_BUSY sleep_secs=10
+$MOD_TARGET_BUSY: ${MOD_TARGET_BUSY}_init
+$MOD_TARGET_BUSY: busymod_work_func, sleeping 10 seconds ...
+% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET_BUSY -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+% modprobe $MOD_TARGET
+livepatch: applying patch '$MOD_LIVEPATCH' to loading module '$MOD_TARGET'
+$MOD_LIVEPATCH: pre_patch_callback: $MOD_TARGET -> [MODULE_STATE_COMING] Full formed, running module_init
+$MOD_TARGET: ${MOD_TARGET}_init
+% rmmod $MOD_TARGET
+$MOD_TARGET: ${MOD_TARGET}_exit
+livepatch: reverting patch '$MOD_LIVEPATCH' on unloading module '$MOD_TARGET'
+$MOD_LIVEPATCH: post_unpatch_callback: $MOD_TARGET -> [MODULE_STATE_GOING] Going away
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': reversing transition from patching to unpatching
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+$MOD_LIVEPATCH: post_unpatch_callback: $MOD_TARGET_BUSY -> [MODULE_STATE_LIVE] Normal state
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH
+% rmmod $MOD_TARGET_BUSY
+$MOD_TARGET_BUSY: busymod_work_func exit
+$MOD_TARGET_BUSY: ${MOD_TARGET_BUSY}_exit"
+
+
+# TEST: multiple livepatches
+#
+# Test loading multiple livepatches. This test-case is mainly for comparing
+# with the next test-case.
+#
+# - Load and unload two livepatches, pre and post (un)patch callbacks
+# execute as each patch progresses through its (un)patching
+# transition.
+
+echo -n "TEST: multiple livepatches ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+load_lp $MOD_LIVEPATCH2
+disable_lp $MOD_LIVEPATCH2
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH2
+unload_lp $MOD_LIVEPATCH
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': patching complete
+% modprobe $MOD_LIVEPATCH2
+livepatch: enabling patch '$MOD_LIVEPATCH2'
+livepatch: '$MOD_LIVEPATCH2': initializing patching transition
+$MOD_LIVEPATCH2: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH2': starting patching transition
+livepatch: '$MOD_LIVEPATCH2': completing patching transition
+$MOD_LIVEPATCH2: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH2': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH2/enabled
+livepatch: '$MOD_LIVEPATCH2': initializing unpatching transition
+$MOD_LIVEPATCH2: pre_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH2': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH2': completing unpatching transition
+$MOD_LIVEPATCH2: post_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH2': unpatching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+$MOD_LIVEPATCH: pre_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+$MOD_LIVEPATCH: post_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH2
+% rmmod $MOD_LIVEPATCH"
+
+
+# TEST: atomic replace
+#
+# Load multiple livepatches, but the second as an 'atomic-replace'
+# patch. When the latter loads, the original livepatch should be
+# disabled and *none* of its pre/post-unpatch callbacks executed. On
+# the other hand, when the atomic-replace livepatch is disabled, its
+# pre/post-unpatch callbacks *should* be executed.
+#
+# - Load and unload two livepatches, the second of which has its
+# .replace flag set true.
+#
+# - Pre and post patch callbacks are executed for both livepatches.
+#
+# - Once the atomic replace module is loaded, only its pre and post
+# unpatch callbacks are executed.
+
+echo -n "TEST: atomic replace ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+load_lp $MOD_LIVEPATCH2 replace=1
+disable_lp $MOD_LIVEPATCH2
+unload_lp $MOD_LIVEPATCH2
+unload_lp $MOD_LIVEPATCH
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+$MOD_LIVEPATCH: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+$MOD_LIVEPATCH: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH': patching complete
+% modprobe $MOD_LIVEPATCH2 replace=1
+livepatch: enabling patch '$MOD_LIVEPATCH2'
+livepatch: '$MOD_LIVEPATCH2': initializing patching transition
+$MOD_LIVEPATCH2: pre_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH2': starting patching transition
+livepatch: '$MOD_LIVEPATCH2': completing patching transition
+$MOD_LIVEPATCH2: post_patch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH2': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH2/enabled
+livepatch: '$MOD_LIVEPATCH2': initializing unpatching transition
+$MOD_LIVEPATCH2: pre_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH2': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH2': completing unpatching transition
+$MOD_LIVEPATCH2: post_unpatch_callback: vmlinux
+livepatch: '$MOD_LIVEPATCH2': unpatching complete
+% rmmod $MOD_LIVEPATCH2
+% rmmod $MOD_LIVEPATCH"
+
+
+exit 0
diff --git a/tools/testing/selftests/livepatch/test-livepatch.sh b/tools/testing/selftests/livepatch/test-livepatch.sh
new file mode 100755
index 000000000000..f05268aea859
--- /dev/null
+++ b/tools/testing/selftests/livepatch/test-livepatch.sh
@@ -0,0 +1,168 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+. $(dirname $0)/functions.sh
+
+MOD_LIVEPATCH=test_klp_livepatch
+MOD_REPLACE=test_klp_atomic_replace
+
+set_dynamic_debug
+
+
+# TEST: basic function patching
+# - load a livepatch that modifies the output from /proc/cmdline and
+# verify correct behavior
+# - unload the livepatch and make sure the patch was removed
+
+echo -n "TEST: basic function patching ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+
+if [[ "$(cat /proc/cmdline)" != "$MOD_LIVEPATCH: this has been live patched" ]] ; then
+ echo -e "FAIL\n\n"
+ die "livepatch kselftest(s) failed"
+fi
+
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+if [[ "$(cat /proc/cmdline)" == "$MOD_LIVEPATCH: this has been live patched" ]] ; then
+ echo -e "FAIL\n\n"
+ die "livepatch kselftest(s) failed"
+fi
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+livepatch: '$MOD_LIVEPATCH': patching complete
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
+
+# TEST: multiple livepatches
+# - load a livepatch that modifies the output from /proc/cmdline and
+# verify correct behavior
+# - load another livepatch and verify that both livepatches are active
+# - unload the second livepatch and verify that the first is still active
+# - unload the first livepatch and verify none are active
+
+echo -n "TEST: multiple livepatches ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+
+grep 'live patched' /proc/cmdline > /dev/kmsg
+grep 'live patched' /proc/meminfo > /dev/kmsg
+
+load_lp $MOD_REPLACE replace=0
+
+grep 'live patched' /proc/cmdline > /dev/kmsg
+grep 'live patched' /proc/meminfo > /dev/kmsg
+
+disable_lp $MOD_REPLACE
+unload_lp $MOD_REPLACE
+
+grep 'live patched' /proc/cmdline > /dev/kmsg
+grep 'live patched' /proc/meminfo > /dev/kmsg
+
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+grep 'live patched' /proc/cmdline > /dev/kmsg
+grep 'live patched' /proc/meminfo > /dev/kmsg
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+livepatch: '$MOD_LIVEPATCH': patching complete
+$MOD_LIVEPATCH: this has been live patched
+% modprobe $MOD_REPLACE replace=0
+livepatch: enabling patch '$MOD_REPLACE'
+livepatch: '$MOD_REPLACE': initializing patching transition
+livepatch: '$MOD_REPLACE': starting patching transition
+livepatch: '$MOD_REPLACE': completing patching transition
+livepatch: '$MOD_REPLACE': patching complete
+$MOD_LIVEPATCH: this has been live patched
+$MOD_REPLACE: this has been live patched
+% echo 0 > /sys/kernel/livepatch/$MOD_REPLACE/enabled
+livepatch: '$MOD_REPLACE': initializing unpatching transition
+livepatch: '$MOD_REPLACE': starting unpatching transition
+livepatch: '$MOD_REPLACE': completing unpatching transition
+livepatch: '$MOD_REPLACE': unpatching complete
+% rmmod $MOD_REPLACE
+$MOD_LIVEPATCH: this has been live patched
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
+
+# TEST: atomic replace livepatch
+# - load a livepatch that modifies the output from /proc/cmdline and
+# verify correct behavior
+# - load an atomic replace livepatch and verify that only the second is active
+# - remove the first livepatch and verify that the atomic replace livepatch
+# is still active
+# - remove the atomic replace livepatch and verify that none are active
+
+echo -n "TEST: atomic replace livepatch ... "
+dmesg -C
+
+load_lp $MOD_LIVEPATCH
+
+grep 'live patched' /proc/cmdline > /dev/kmsg
+grep 'live patched' /proc/meminfo > /dev/kmsg
+
+load_lp $MOD_REPLACE replace=1
+
+grep 'live patched' /proc/cmdline > /dev/kmsg
+grep 'live patched' /proc/meminfo > /dev/kmsg
+
+unload_lp $MOD_LIVEPATCH
+
+grep 'live patched' /proc/cmdline > /dev/kmsg
+grep 'live patched' /proc/meminfo > /dev/kmsg
+
+disable_lp $MOD_REPLACE
+unload_lp $MOD_REPLACE
+
+grep 'live patched' /proc/cmdline > /dev/kmsg
+grep 'live patched' /proc/meminfo > /dev/kmsg
+
+check_result "% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+livepatch: '$MOD_LIVEPATCH': patching complete
+$MOD_LIVEPATCH: this has been live patched
+% modprobe $MOD_REPLACE replace=1
+livepatch: enabling patch '$MOD_REPLACE'
+livepatch: '$MOD_REPLACE': initializing patching transition
+livepatch: '$MOD_REPLACE': starting patching transition
+livepatch: '$MOD_REPLACE': completing patching transition
+livepatch: '$MOD_REPLACE': patching complete
+$MOD_REPLACE: this has been live patched
+% rmmod $MOD_LIVEPATCH
+$MOD_REPLACE: this has been live patched
+% echo 0 > /sys/kernel/livepatch/$MOD_REPLACE/enabled
+livepatch: '$MOD_REPLACE': initializing unpatching transition
+livepatch: '$MOD_REPLACE': starting unpatching transition
+livepatch: '$MOD_REPLACE': completing unpatching transition
+livepatch: '$MOD_REPLACE': unpatching complete
+% rmmod $MOD_REPLACE"
+
+
+exit 0
diff --git a/tools/testing/selftests/livepatch/test-shadow-vars.sh b/tools/testing/selftests/livepatch/test-shadow-vars.sh
new file mode 100755
index 000000000000..04a37831e204
--- /dev/null
+++ b/tools/testing/selftests/livepatch/test-shadow-vars.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+. $(dirname $0)/functions.sh
+
+MOD_TEST=test_klp_shadow_vars
+
+set_dynamic_debug
+
+
+# TEST: basic shadow variable API
+# - load a module that exercises the shadow variable API
+
+echo -n "TEST: basic shadow variable API ... "
+dmesg -C
+
+load_mod $MOD_TEST
+unload_mod $MOD_TEST
+
+check_result "% modprobe $MOD_TEST
+$MOD_TEST: klp_shadow_get(obj=PTR5, id=0x1234) = PTR0
+$MOD_TEST: got expected NULL result
+$MOD_TEST: shadow_ctor: PTR6 -> PTR1
+$MOD_TEST: klp_shadow_alloc(obj=PTR5, id=0x1234, size=8, gfp_flags=GFP_KERNEL), ctor=PTR7, ctor_data=PTR1 = PTR6
+$MOD_TEST: shadow_ctor: PTR8 -> PTR2
+$MOD_TEST: klp_shadow_alloc(obj=PTR9, id=0x1234, size=8, gfp_flags=GFP_KERNEL), ctor=PTR7, ctor_data=PTR2 = PTR8
+$MOD_TEST: shadow_ctor: PTR10 -> PTR3
+$MOD_TEST: klp_shadow_alloc(obj=PTR5, id=0x1235, size=8, gfp_flags=GFP_KERNEL), ctor=PTR7, ctor_data=PTR3 = PTR10
+$MOD_TEST: klp_shadow_get(obj=PTR5, id=0x1234) = PTR6
+$MOD_TEST: got expected PTR6 -> PTR1 result
+$MOD_TEST: klp_shadow_get(obj=PTR9, id=0x1234) = PTR8
+$MOD_TEST: got expected PTR8 -> PTR2 result
+$MOD_TEST: klp_shadow_get(obj=PTR5, id=0x1235) = PTR10
+$MOD_TEST: got expected PTR10 -> PTR3 result
+$MOD_TEST: shadow_ctor: PTR11 -> PTR4
+$MOD_TEST: klp_shadow_get_or_alloc(obj=PTR12, id=0x1234, size=8, gfp_flags=GFP_KERNEL), ctor=PTR7, ctor_data=PTR4 = PTR11
+$MOD_TEST: klp_shadow_get_or_alloc(obj=PTR12, id=0x1234, size=8, gfp_flags=GFP_KERNEL), ctor=PTR7, ctor_data=PTR4 = PTR11
+$MOD_TEST: got expected PTR11 -> PTR4 result
+$MOD_TEST: shadow_dtor(obj=PTR5, shadow_data=PTR6)
+$MOD_TEST: klp_shadow_free(obj=PTR5, id=0x1234, dtor=PTR13)
+$MOD_TEST: klp_shadow_get(obj=PTR5, id=0x1234) = PTR0
+$MOD_TEST: got expected NULL result
+$MOD_TEST: shadow_dtor(obj=PTR9, shadow_data=PTR8)
+$MOD_TEST: klp_shadow_free(obj=PTR9, id=0x1234, dtor=PTR13)
+$MOD_TEST: klp_shadow_get(obj=PTR9, id=0x1234) = PTR0
+$MOD_TEST: got expected NULL result
+$MOD_TEST: shadow_dtor(obj=PTR12, shadow_data=PTR11)
+$MOD_TEST: klp_shadow_free(obj=PTR12, id=0x1234, dtor=PTR13)
+$MOD_TEST: klp_shadow_get(obj=PTR12, id=0x1234) = PTR0
+$MOD_TEST: got expected NULL result
+$MOD_TEST: klp_shadow_get(obj=PTR5, id=0x1235) = PTR10
+$MOD_TEST: got expected PTR10 -> PTR3 result
+$MOD_TEST: shadow_dtor(obj=PTR5, shadow_data=PTR10)
+$MOD_TEST: klp_shadow_free_all(id=0x1235, dtor=PTR13)
+$MOD_TEST: klp_shadow_get(obj=PTR5, id=0x1234) = PTR0
+$MOD_TEST: shadow_get() got expected NULL result
+% rmmod test_klp_shadow_vars"
+
+exit 0
diff --git a/tools/testing/selftests/media_tests/media_dev_allocator.sh b/tools/testing/selftests/media_tests/media_dev_allocator.sh
new file mode 100755
index 000000000000..ffe00c59a483
--- /dev/null
+++ b/tools/testing/selftests/media_tests/media_dev_allocator.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Media Device Allocator API test script
+# Copyright (c) 2019 Shuah Khan <shuah@kernel.org>
+
+echo "Media Device Allocator testing: unbind and bind"
+echo "media driver $1 audio driver $2"
+
+MDRIVER=/sys/bus/usb/drivers/$1
+cd $MDRIVER
+MDEV=$(ls -d *\-*)
+
+ADRIVER=/sys/bus/usb/drivers/$2
+cd $ADRIVER
+ADEV=$(ls -d *\-*.1)
+
+echo "=================================="
+echo "Test unbind both devices - start"
+echo "Running unbind of $MDEV from $MDRIVER"
+echo $MDEV > $MDRIVER/unbind;
+
+echo "Media device should still be present!"
+ls -l /dev/media*
+
+echo "sound driver is at: $ADRIVER"
+echo "Device is: $ADEV"
+
+echo "Running unbind of $ADEV from $ADRIVER"
+echo $ADEV > $ADRIVER/unbind;
+
+echo "Media device should have been deleted!"
+ls -l /dev/media*
+echo "Test unbind both devices - end"
+
+echo "=================================="
+
+echo "Test bind both devices - start"
+echo "Running bind of $MDEV from $MDRIVER"
+echo $MDEV > $MDRIVER/bind;
+
+echo "Media device should be present!"
+ls -l /dev/media*
+
+echo "Running bind of $ADEV from $ADRIVER"
+echo $ADEV > $ADRIVER/bind;
+
+echo "Media device should be there!"
+ls -l /dev/media*
+
+echo "Test bind both devices - end"
+
+echo "=================================="
+
+echo "Test unbind $MDEV - bind $MDEV - unbind $ADEV - bind $ADEV start"
+
+echo "Running unbind of $MDEV from $MDRIVER"
+echo $MDEV > $MDRIVER/unbind;
+
+echo "Media device should be there!"
+ls -l /dev/media*
+
+sleep 1
+
+echo "Running bind of $MDEV from $MDRIVER"
+echo $MDEV > $MDRIVER/bind;
+
+echo "Media device should be there!"
+ls -l /dev/media*
+
+echo "Running unbind of $ADEV from $ADRIVER"
+echo $ADEV > $ADRIVER/unbind;
+
+echo "Media device should be there!"
+ls -l /dev/media*
+
+sleep 1
+
+echo "Running bind of $ADEV from $ADRIVER"
+echo $ADEV > $ADRIVER/bind;
+
+echo "Media device should be there!"
+ls -l /dev/media*
+
+echo "Test unbind $MDEV - bind $MDEV - unbind $ADEV - bind $ADEV end"
+echo "=================================="
diff --git a/tools/testing/selftests/membarrier/Makefile b/tools/testing/selftests/membarrier/Makefile
index 02845532b059..97e3bdf3d1e9 100644
--- a/tools/testing/selftests/membarrier/Makefile
+++ b/tools/testing/selftests/membarrier/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
CFLAGS += -g -I../../../../usr/include/
TEST_GEN_PROGS := membarrier_test
diff --git a/tools/testing/selftests/membarrier/membarrier_test.c b/tools/testing/selftests/membarrier/membarrier_test.c
index 6793f8ecc8e7..70b4ddbf126b 100644
--- a/tools/testing/selftests/membarrier/membarrier_test.c
+++ b/tools/testing/selftests/membarrier/membarrier_test.c
@@ -304,6 +304,7 @@ static int test_membarrier_query(void)
int main(int argc, char **argv)
{
ksft_print_header();
+ ksft_set_plan(13);
test_membarrier_query();
test_membarrier();
diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c
index 10baa1652fc2..c67d32eeb668 100644
--- a/tools/testing/selftests/memfd/memfd_test.c
+++ b/tools/testing/selftests/memfd/memfd_test.c
@@ -54,6 +54,22 @@ static int mfd_assert_new(const char *name, loff_t sz, unsigned int flags)
return fd;
}
+static int mfd_assert_reopen_fd(int fd_in)
+{
+ int r, fd;
+ char path[100];
+
+ sprintf(path, "/proc/self/fd/%d", fd_in);
+
+ fd = open(path, O_RDWR);
+ if (fd < 0) {
+ printf("re-open of existing fd %d failed\n", fd_in);
+ abort();
+ }
+
+ return fd;
+}
+
static void mfd_fail_new(const char *name, unsigned int flags)
{
int r;
@@ -255,6 +271,25 @@ static void mfd_assert_read(int fd)
munmap(p, mfd_def_size);
}
+/* Test that PROT_READ + MAP_SHARED mappings work. */
+static void mfd_assert_read_shared(int fd)
+{
+ void *p;
+
+ /* verify PROT_READ and MAP_SHARED *is* allowed */
+ p = mmap(NULL,
+ mfd_def_size,
+ PROT_READ,
+ MAP_SHARED,
+ fd,
+ 0);
+ if (p == MAP_FAILED) {
+ printf("mmap() failed: %m\n");
+ abort();
+ }
+ munmap(p, mfd_def_size);
+}
+
static void mfd_assert_write(int fd)
{
ssize_t l;
@@ -693,6 +728,44 @@ static void test_seal_write(void)
}
/*
+ * Test SEAL_FUTURE_WRITE
+ * Test whether SEAL_FUTURE_WRITE actually prevents modifications.
+ */
+static void test_seal_future_write(void)
+{
+ int fd, fd2;
+ void *p;
+
+ printf("%s SEAL-FUTURE-WRITE\n", memfd_str);
+
+ fd = mfd_assert_new("kern_memfd_seal_future_write",
+ mfd_def_size,
+ MFD_CLOEXEC | MFD_ALLOW_SEALING);
+
+ p = mfd_assert_mmap_shared(fd);
+
+ mfd_assert_has_seals(fd, 0);
+
+ mfd_assert_add_seals(fd, F_SEAL_FUTURE_WRITE);
+ mfd_assert_has_seals(fd, F_SEAL_FUTURE_WRITE);
+
+ /* read should pass, writes should fail */
+ mfd_assert_read(fd);
+ mfd_assert_read_shared(fd);
+ mfd_fail_write(fd);
+
+ fd2 = mfd_assert_reopen_fd(fd);
+ /* read should pass, writes should still fail */
+ mfd_assert_read(fd2);
+ mfd_assert_read_shared(fd2);
+ mfd_fail_write(fd2);
+
+ munmap(p, mfd_def_size);
+ close(fd2);
+ close(fd);
+}
+
+/*
* Test SEAL_SHRINK
* Test whether SEAL_SHRINK actually prevents shrinking
*/
@@ -945,6 +1018,7 @@ int main(int argc, char **argv)
test_basic();
test_seal_write();
+ test_seal_future_write();
test_seal_shrink();
test_seal_grow();
test_seal_resize();
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 5821bdd98d20..474040448601 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -7,9 +7,7 @@ CONFIG_NET_L3_MASTER_DEV=y
CONFIG_IPV6=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_VETH=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
CONFIG_NET_IPVTI=y
-CONFIG_INET6_XFRM_MODE_TUNNEL=y
CONFIG_IPV6_VTI=y
CONFIG_DUMMY=y
CONFIG_BRIDGE=y
@@ -17,8 +15,7 @@ CONFIG_VLAN_8021Q=y
CONFIG_NETFILTER=y
CONFIG_NETFILTER_ADVANCED=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_NAT_IPV6=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_NF_NAT=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP6_NF_NAT=m
diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh
index d4cfb6a7a086..a93e6b690e06 100755
--- a/tools/testing/selftests/net/fib_rule_tests.sh
+++ b/tools/testing/selftests/net/fib_rule_tests.sh
@@ -15,6 +15,7 @@ GW_IP6=2001:db8:1::2
SRC_IP6=2001:db8:1::3
DEV_ADDR=192.51.100.1
+DEV_ADDR6=2001:db8:1::1
DEV=dummy0
log_test()
@@ -27,6 +28,7 @@ log_test()
nsuccess=$((nsuccess+1))
printf "\n TEST: %-50s [ OK ]\n" "${msg}"
else
+ ret=1
nfail=$((nfail+1))
printf "\n TEST: %-50s [FAIL]\n" "${msg}"
if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
@@ -54,8 +56,8 @@ setup()
$IP link add dummy0 type dummy
$IP link set dev dummy0 up
- $IP address add 198.51.100.1/24 dev dummy0
- $IP -6 address add 2001:db8:1::1/64 dev dummy0
+ $IP address add $DEV_ADDR/24 dev dummy0
+ $IP -6 address add $DEV_ADDR6/64 dev dummy0
set +e
}
@@ -147,8 +149,8 @@ fib_rule6_test()
fib_check_iproute_support "ipproto" "ipproto"
if [ $? -eq 0 ]; then
- match="ipproto icmp"
- fib_rule6_test_match_n_redirect "$match" "$match" "ipproto icmp match"
+ match="ipproto ipv6-icmp"
+ fib_rule6_test_match_n_redirect "$match" "$match" "ipproto ipv6-icmp match"
fi
}
@@ -185,8 +187,13 @@ fib_rule4_test()
match="oif $DEV"
fib_rule4_test_match_n_redirect "$match" "$match" "oif redirect to table"
+ # need enable forwarding and disable rp_filter temporarily as all the
+ # addresses are in the same subnet and egress device == ingress device.
+ ip netns exec testns sysctl -w net.ipv4.ip_forward=1
+ ip netns exec testns sysctl -w net.ipv4.conf.$DEV.rp_filter=0
match="from $SRC_IP iif $DEV"
fib_rule4_test_match_n_redirect "$match" "$match" "iif redirect to table"
+ ip netns exec testns sysctl -w net.ipv4.ip_forward=0
match="tos 0x10"
fib_rule4_test_match_n_redirect "$match" "$match" "tos redirect to table"
@@ -245,4 +252,9 @@ setup
run_fibrule_tests
cleanup
+if [ "$TESTS" != "none" ]; then
+ printf "\nTests passed: %3d\n" ${nsuccess}
+ printf "Tests failed: %3d\n" ${nfail}
+fi
+
exit $ret
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 1080ff55a788..9457aaeae092 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -9,7 +9,8 @@ ret=0
ksft_skip=4
# all tests in this script. Can be overridden with -t option
-TESTS="unregister down carrier nexthop ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics"
+TESTS="unregister down carrier nexthop ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw"
+
VERBOSE=0
PAUSE_ON_FAIL=no
PAUSE=no
@@ -48,6 +49,7 @@ setup()
{
set -e
ip netns add ns1
+ ip netns set ns1 auto
$IP link set dev lo up
ip netns exec ns1 sysctl -qw net.ipv4.ip_forward=1
ip netns exec ns1 sysctl -qw net.ipv6.conf.all.forwarding=1
@@ -605,6 +607,39 @@ run_cmd()
return $rc
}
+check_expected()
+{
+ local out="$1"
+ local expected="$2"
+ local rc=0
+
+ [ "${out}" = "${expected}" ] && return 0
+
+ if [ -z "${out}" ]; then
+ if [ "$VERBOSE" = "1" ]; then
+ printf "\nNo route entry found\n"
+ printf "Expected:\n"
+ printf " ${expected}\n"
+ fi
+ return 1
+ fi
+
+ # tricky way to convert output to 1-line without ip's
+ # messy '\'; this drops all extra white space
+ out=$(echo ${out})
+ if [ "${out}" != "${expected}" ]; then
+ rc=1
+ if [ "${VERBOSE}" = "1" ]; then
+ printf " Unexpected route entry. Have:\n"
+ printf " ${out}\n"
+ printf " Expected:\n"
+ printf " ${expected}\n\n"
+ fi
+ fi
+
+ return $rc
+}
+
# add route for a prefix, flushing any existing routes first
# expected to be the first step of a test
add_route6()
@@ -652,31 +687,7 @@ check_route6()
pfx=$1
out=$($IP -6 ro ls match ${pfx} | sed -e 's/ pref medium//')
- [ "${out}" = "${expected}" ] && return 0
-
- if [ -z "${out}" ]; then
- if [ "$VERBOSE" = "1" ]; then
- printf "\nNo route entry found\n"
- printf "Expected:\n"
- printf " ${expected}\n"
- fi
- return 1
- fi
-
- # tricky way to convert output to 1-line without ip's
- # messy '\'; this drops all extra white space
- out=$(echo ${out})
- if [ "${out}" != "${expected}" ]; then
- rc=1
- if [ "${VERBOSE}" = "1" ]; then
- printf " Unexpected route entry. Have:\n"
- printf " ${out}\n"
- printf " Expected:\n"
- printf " ${expected}\n\n"
- fi
- fi
-
- return $rc
+ check_expected "${out}" "${expected}"
}
route_cleanup()
@@ -698,6 +709,7 @@ route_setup()
set -e
ip netns add ns2
+ ip netns set ns2 auto
ip -netns ns2 link set dev lo up
ip netns exec ns2 sysctl -qw net.ipv4.ip_forward=1
ip netns exec ns2 sysctl -qw net.ipv6.conf.all.forwarding=1
@@ -725,7 +737,7 @@ route_setup()
ip -netns ns2 addr add 172.16.103.2/24 dev veth4
ip -netns ns2 addr add 172.16.104.1/24 dev dummy1
- set +ex
+ set +e
}
# assumption is that basic add of a single path route works
@@ -960,7 +972,8 @@ ipv6_addr_metric_test()
run_cmd "$IP li set dev dummy2 down"
rc=$?
if [ $rc -eq 0 ]; then
- check_route6 ""
+ out=$($IP -6 ro ls match 2001:db8:104::/64)
+ check_expected "${out}" ""
rc=$?
fi
log_test $rc 0 "Prefix route removed on link down"
@@ -1091,38 +1104,13 @@ check_route()
local pfx
local expected="$1"
local out
- local rc=0
set -- $expected
pfx=$1
[ "${pfx}" = "unreachable" ] && pfx=$2
out=$($IP ro ls match ${pfx})
- [ "${out}" = "${expected}" ] && return 0
-
- if [ -z "${out}" ]; then
- if [ "$VERBOSE" = "1" ]; then
- printf "\nNo route entry found\n"
- printf "Expected:\n"
- printf " ${expected}\n"
- fi
- return 1
- fi
-
- # tricky way to convert output to 1-line without ip's
- # messy '\'; this drops all extra white space
- out=$(echo ${out})
- if [ "${out}" != "${expected}" ]; then
- rc=1
- if [ "${VERBOSE}" = "1" ]; then
- printf " Unexpected route entry. Have:\n"
- printf " ${out}\n"
- printf " Expected:\n"
- printf " ${expected}\n\n"
- fi
- fi
-
- return $rc
+ check_expected "${out}" "${expected}"
}
# assumption is that basic add of a single path route works
@@ -1387,7 +1375,8 @@ ipv4_addr_metric_test()
run_cmd "$IP li set dev dummy2 down"
rc=$?
if [ $rc -eq 0 ]; then
- check_route ""
+ out=$($IP ro ls match 172.16.104.0/24)
+ check_expected "${out}" ""
rc=$?
fi
log_test $rc 0 "Prefix route removed on link down"
@@ -1442,6 +1431,70 @@ ipv4_route_metrics_test()
route_cleanup
}
+ipv4_route_v6_gw_test()
+{
+ local rc
+
+ echo
+ echo "IPv4 route with IPv6 gateway tests"
+
+ route_setup
+ sleep 2
+
+ #
+ # single path route
+ #
+ run_cmd "$IP ro add 172.16.104.0/24 via inet6 2001:db8:101::2"
+ rc=$?
+ log_test $rc 0 "Single path route with IPv6 gateway"
+ if [ $rc -eq 0 ]; then
+ check_route "172.16.104.0/24 via inet6 2001:db8:101::2 dev veth1"
+ fi
+
+ run_cmd "ip netns exec ns1 ping -w1 -c1 172.16.104.1"
+ log_test $rc 0 "Single path route with IPv6 gateway - ping"
+
+ run_cmd "$IP ro del 172.16.104.0/24 via inet6 2001:db8:101::2"
+ rc=$?
+ log_test $rc 0 "Single path route delete"
+ if [ $rc -eq 0 ]; then
+ check_route "172.16.112.0/24"
+ fi
+
+ #
+ # multipath - v6 then v4
+ #
+ run_cmd "$IP ro add 172.16.104.0/24 nexthop via inet6 2001:db8:101::2 dev veth1 nexthop via 172.16.103.2 dev veth3"
+ rc=$?
+ log_test $rc 0 "Multipath route add - v6 nexthop then v4"
+ if [ $rc -eq 0 ]; then
+ check_route "172.16.104.0/24 nexthop via inet6 2001:db8:101::2 dev veth1 weight 1 nexthop via 172.16.103.2 dev veth3 weight 1"
+ fi
+
+ run_cmd "$IP ro del 172.16.104.0/24 nexthop via 172.16.103.2 dev veth3 nexthop via inet6 2001:db8:101::2 dev veth1"
+ log_test $? 2 " Multipath route delete - nexthops in wrong order"
+
+ run_cmd "$IP ro del 172.16.104.0/24 nexthop via inet6 2001:db8:101::2 dev veth1 nexthop via 172.16.103.2 dev veth3"
+ log_test $? 0 " Multipath route delete exact match"
+
+ #
+ # multipath - v4 then v6
+ #
+ run_cmd "$IP ro add 172.16.104.0/24 nexthop via 172.16.103.2 dev veth3 nexthop via inet6 2001:db8:101::2 dev veth1"
+ rc=$?
+ log_test $rc 0 "Multipath route add - v4 nexthop then v6"
+ if [ $rc -eq 0 ]; then
+ check_route "172.16.104.0/24 nexthop via 172.16.103.2 dev veth3 weight 1 nexthop via inet6 2001:db8:101::2 dev veth1 weight 1"
+ fi
+
+ run_cmd "$IP ro del 172.16.104.0/24 nexthop via inet6 2001:db8:101::2 dev veth1 nexthop via 172.16.103.2 dev veth3"
+ log_test $? 2 " Multipath route delete - nexthops in wrong order"
+
+ run_cmd "$IP ro del 172.16.104.0/24 nexthop via 172.16.103.2 dev veth3 nexthop via inet6 2001:db8:101::2 dev veth1"
+ log_test $? 0 " Multipath route delete exact match"
+
+ route_cleanup
+}
################################################################################
# usage
@@ -1511,6 +1564,7 @@ do
ipv4_addr_metric) ipv4_addr_metric_test;;
ipv6_route_metrics) ipv6_route_metrics_test;;
ipv4_route_metrics) ipv4_route_metrics_test;;
+ ipv4_route_v6_gw) ipv4_route_v6_gw_test;;
help) echo "Test names: $TESTS"; exit 0;;
esac
diff --git a/tools/testing/selftests/net/forwarding/bridge_igmp.sh b/tools/testing/selftests/net/forwarding/bridge_igmp.sh
new file mode 100755
index 000000000000..88d2472ba151
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/bridge_igmp.sh
@@ -0,0 +1,152 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="reportleave_test"
+NUM_NETIFS=4
+CHECK_TC="yes"
+TEST_GROUP="239.10.10.10"
+TEST_GROUP_MAC="01:00:5e:0a:0a:0a"
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.2/24 2001:db8:1::2/64
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 192.0.2.2/24 2001:db8:1::2/64
+}
+
+switch_create()
+{
+ ip link add dev br0 type bridge mcast_snooping 1 mcast_querier 1
+
+ ip link set dev $swp1 master br0
+ ip link set dev $swp2 master br0
+
+ ip link set dev br0 up
+ ip link set dev $swp1 up
+ ip link set dev $swp2 up
+}
+
+switch_destroy()
+{
+ ip link set dev $swp2 down
+ ip link set dev $swp1 down
+
+ ip link del dev br0
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+
+ # Always cleanup the mcast group
+ ip address del dev $h2 $TEST_GROUP/32 2>&1 1>/dev/null
+
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+# return 0 if the packet wasn't seen on host2_if or 1 if it was
+mcast_packet_test()
+{
+ local mac=$1
+ local ip=$2
+ local host1_if=$3
+ local host2_if=$4
+ local seen=0
+
+ # Add an ACL on `host2_if` which will tell us whether the packet
+ # was received by it or not.
+ tc qdisc add dev $host2_if ingress
+ tc filter add dev $host2_if ingress protocol ip pref 1 handle 101 \
+ flower dst_mac $mac action drop
+
+ $MZ $host1_if -c 1 -p 64 -b $mac -B $ip -t udp "dp=4096,sp=2048" -q
+ sleep 1
+
+ tc -j -s filter show dev $host2_if ingress \
+ | jq -e ".[] | select(.options.handle == 101) \
+ | select(.options.actions[0].stats.packets == 1)" &> /dev/null
+ if [[ $? -eq 0 ]]; then
+ seen=1
+ fi
+
+ tc filter del dev $host2_if ingress protocol ip pref 1 handle 101 flower
+ tc qdisc del dev $host2_if ingress
+
+ return $seen
+}
+
+reportleave_test()
+{
+ RET=0
+ ip address add dev $h2 $TEST_GROUP/32 autojoin
+ check_err $? "Could not join $TEST_GROUP"
+
+ sleep 5
+ bridge mdb show dev br0 | grep $TEST_GROUP 1>/dev/null
+ check_err $? "Report didn't create mdb entry for $TEST_GROUP"
+
+ mcast_packet_test $TEST_GROUP_MAC $TEST_GROUP $h1 $h2
+ check_fail $? "Traffic to $TEST_GROUP wasn't forwarded"
+
+ log_test "IGMP report $TEST_GROUP"
+
+ RET=0
+ bridge mdb show dev br0 | grep $TEST_GROUP 1>/dev/null
+ check_err $? "mdb entry for $TEST_GROUP is missing"
+
+ ip address del dev $h2 $TEST_GROUP/32
+ check_err $? "Could not leave $TEST_GROUP"
+
+ sleep 5
+ bridge mdb show dev br0 | grep $TEST_GROUP 1>/dev/null
+ check_fail $? "Leave didn't delete mdb entry for $TEST_GROUP"
+
+ mcast_packet_test $TEST_GROUP_MAC $TEST_GROUP $h1 $h2
+ check_err $? "Traffic to $TEST_GROUP was forwarded without mdb entry"
+
+ log_test "IGMP leave $TEST_GROUP"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/config b/tools/testing/selftests/net/forwarding/config
index 5cd2aed97958..da96eff72a8e 100644
--- a/tools/testing/selftests/net/forwarding/config
+++ b/tools/testing/selftests/net/forwarding/config
@@ -10,3 +10,5 @@ CONFIG_NET_CLS_FLOWER=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_ACT_GACT=m
CONFIG_VETH=m
+CONFIG_NAMESPACES=y
+CONFIG_NET_NS=y
diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
index 5ab1e5f43022..8553a67a2322 100644
--- a/tools/testing/selftests/net/forwarding/devlink_lib.sh
+++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh
@@ -2,21 +2,10 @@
# SPDX-License-Identifier: GPL-2.0
##############################################################################
-# Source library
-
-relative_path="${BASH_SOURCE%/*}"
-if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
- relative_path="."
-fi
-
-source "$relative_path/lib.sh"
-
-##############################################################################
# Defines
-DEVLINK_DEV=$(devlink port show | grep "${NETIFS[p1]}" | \
- grep -v "${NETIFS[p1]}[0-9]" | cut -d" " -f1 | \
- rev | cut -d"/" -f2- | rev)
+DEVLINK_DEV=$(devlink port show "${NETIFS[p1]}" -j \
+ | jq -r '.port | keys[]' | cut -d/ -f-2)
if [ -z "$DEVLINK_DEV" ]; then
echo "SKIP: ${NETIFS[p1]} has no devlink device registered for it"
exit 1
@@ -32,7 +21,7 @@ DEVLINK_VIDDID=$(lspci -s $(echo $DEVLINK_DEV | cut -d"/" -f2) \
##############################################################################
# Sanity checks
-devlink -j resource show "$DEVLINK_DEV" &> /dev/null
+devlink help 2>&1 | grep resource &> /dev/null
if [ $? -ne 0 ]; then
echo "SKIP: iproute2 too old, missing devlink resource support"
exit 1
@@ -106,3 +95,98 @@ devlink_reload()
grep -c "size_new")
check_err $still_pending "Failed reload - There are still unset sizes"
}
+
+declare -A DEVLINK_ORIG
+
+devlink_port_pool_threshold()
+{
+ local port=$1; shift
+ local pool=$1; shift
+
+ devlink sb port pool show $port pool $pool -j \
+ | jq '.port_pool."'"$port"'"[].threshold'
+}
+
+devlink_port_pool_th_set()
+{
+ local port=$1; shift
+ local pool=$1; shift
+ local th=$1; shift
+ local key="port_pool($port,$pool).threshold"
+
+ DEVLINK_ORIG[$key]=$(devlink_port_pool_threshold $port $pool)
+ devlink sb port pool set $port pool $pool th $th
+}
+
+devlink_port_pool_th_restore()
+{
+ local port=$1; shift
+ local pool=$1; shift
+ local key="port_pool($port,$pool).threshold"
+
+ devlink sb port pool set $port pool $pool th ${DEVLINK_ORIG[$key]}
+}
+
+devlink_pool_size_thtype()
+{
+ local pool=$1; shift
+
+ devlink sb pool show "$DEVLINK_DEV" pool $pool -j \
+ | jq -r '.pool[][] | (.size, .thtype)'
+}
+
+devlink_pool_size_thtype_set()
+{
+ local pool=$1; shift
+ local thtype=$1; shift
+ local size=$1; shift
+ local key="pool($pool).size_thtype"
+
+ DEVLINK_ORIG[$key]=$(devlink_pool_size_thtype $pool)
+ devlink sb pool set "$DEVLINK_DEV" pool $pool size $size thtype $thtype
+}
+
+devlink_pool_size_thtype_restore()
+{
+ local pool=$1; shift
+ local key="pool($pool).size_thtype"
+ local -a orig=(${DEVLINK_ORIG[$key]})
+
+ devlink sb pool set "$DEVLINK_DEV" pool $pool \
+ size ${orig[0]} thtype ${orig[1]}
+}
+
+devlink_tc_bind_pool_th()
+{
+ local port=$1; shift
+ local tc=$1; shift
+ local dir=$1; shift
+
+ devlink sb tc bind show $port tc $tc type $dir -j \
+ | jq -r '.tc_bind[][] | (.pool, .threshold)'
+}
+
+devlink_tc_bind_pool_th_set()
+{
+ local port=$1; shift
+ local tc=$1; shift
+ local dir=$1; shift
+ local pool=$1; shift
+ local th=$1; shift
+ local key="tc_bind($port,$dir,$tc).pool_th"
+
+ DEVLINK_ORIG[$key]=$(devlink_tc_bind_pool_th $port $tc $dir)
+ devlink sb tc bind set $port tc $tc type $dir pool $pool th $th
+}
+
+devlink_tc_bind_pool_th_restore()
+{
+ local port=$1; shift
+ local tc=$1; shift
+ local dir=$1; shift
+ local key="tc_bind($port,$dir,$tc).pool_th"
+ local -a orig=(${DEVLINK_ORIG[$key]})
+
+ devlink sb tc bind set $port tc $tc type $dir \
+ pool ${orig[0]} th ${orig[1]}
+}
diff --git a/tools/testing/selftests/net/forwarding/forwarding.config.sample b/tools/testing/selftests/net/forwarding/forwarding.config.sample
index e819d049d9ce..e2adb533c8fc 100644
--- a/tools/testing/selftests/net/forwarding/forwarding.config.sample
+++ b/tools/testing/selftests/net/forwarding/forwarding.config.sample
@@ -33,3 +33,6 @@ PAUSE_ON_CLEANUP=no
NETIF_TYPE=veth
# Whether to create virtual interfaces (veth) or not
NETIF_CREATE=yes
+# Timeout (in seconds) before ping exits regardless of how many packets have
+# been sent or received
+PING_TIMEOUT=5
diff --git a/tools/testing/selftests/net/forwarding/ipip_flat_gre.sh b/tools/testing/selftests/net/forwarding/ipip_flat_gre.sh
new file mode 100755
index 000000000000..abb694397b86
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_flat_gre.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnel without key.
+# This test uses flat topology for IP tunneling tests. See ipip_lib.sh for more
+# details.
+
+ALL_TESTS="gre_flat4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_flat_create gre $ol1 $ul1
+ sw2_flat_create gre $ol2 $ul2
+}
+
+gre_flat4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre flat"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_flat_destroy $ol2 $ul2
+ sw1_flat_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_flat_gre_key.sh b/tools/testing/selftests/net/forwarding/ipip_flat_gre_key.sh
new file mode 100755
index 000000000000..c4f373337e48
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_flat_gre_key.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnel with key.
+# This test uses flat topology for IP tunneling tests. See ipip_lib.sh for more
+# details.
+
+ALL_TESTS="gre_flat4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_flat_create gre $ol1 $ul1 key 233
+ sw2_flat_create gre $ol2 $ul2 key 233
+}
+
+gre_flat4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre flat with key"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_flat_destroy $ol2 $ul2
+ sw1_flat_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_flat_gre_keys.sh b/tools/testing/selftests/net/forwarding/ipip_flat_gre_keys.sh
new file mode 100755
index 000000000000..a811130c0627
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_flat_gre_keys.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnel with key.
+# This test uses flat topology for IP tunneling tests. See ipip_lib.sh for more
+# details.
+
+ALL_TESTS="gre_flat4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_flat_create gre $ol1 $ul1 ikey 111 okey 222
+ sw2_flat_create gre $ol2 $ul2 ikey 222 okey 111
+}
+
+gre_flat4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre flat with ikey/okey"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_flat_destroy $ol2 $ul2
+ sw1_flat_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_hier_gre.sh b/tools/testing/selftests/net/forwarding/ipip_hier_gre.sh
new file mode 100755
index 000000000000..05c5b3cf2f78
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_hier_gre.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnels without key.
+# This test uses hierarchical topology for IP tunneling tests. See
+# ipip_lib.sh for more details.
+
+ALL_TESTS="gre_hier4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_hierarchical_create gre $ol1 $ul1
+ sw2_hierarchical_create gre $ol2 $ul2
+}
+
+gre_hier4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre hierarchical"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_hierarchical_destroy $ol2 $ul2
+ sw1_hierarchical_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_hier_gre_key.sh b/tools/testing/selftests/net/forwarding/ipip_hier_gre_key.sh
new file mode 100755
index 000000000000..9b105dbca32a
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_hier_gre_key.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnels without key.
+# This test uses hierarchical topology for IP tunneling tests. See
+# ipip_lib.sh for more details.
+
+ALL_TESTS="gre_hier4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_hierarchical_create gre $ol1 $ul1 key 22
+ sw2_hierarchical_create gre $ol2 $ul2 key 22
+}
+
+gre_hier4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre hierarchical with key"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_hierarchical_destroy $ol2 $ul2
+ sw1_hierarchical_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_hier_gre_keys.sh b/tools/testing/selftests/net/forwarding/ipip_hier_gre_keys.sh
new file mode 100755
index 000000000000..e275d25bd83a
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_hier_gre_keys.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test IP-in-IP GRE tunnels without key.
+# This test uses hierarchical topology for IP tunneling tests. See
+# ipip_lib.sh for more details.
+
+ALL_TESTS="gre_hier4 gre_mtu_change"
+
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_hierarchical_create gre $ol1 $ul1 ikey 111 okey 222
+ sw2_hierarchical_create gre $ol2 $ul2 ikey 222 okey 111
+}
+
+gre_hier4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre hierarchical with ikey/okey"
+}
+
+gre_mtu_change()
+{
+ test_mtu_change gre
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ sw2_hierarchical_destroy $ol2 $ul2
+ sw1_hierarchical_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+ vrf_cleanup
+ forwarding_restore
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ipip_lib.sh b/tools/testing/selftests/net/forwarding/ipip_lib.sh
new file mode 100644
index 000000000000..30f36a57bae6
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/ipip_lib.sh
@@ -0,0 +1,349 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Handles creation and destruction of IP-in-IP or GRE tunnels over the given
+# topology. Supports both flat and hierarchical models.
+#
+# Flat Model:
+# Overlay and underlay share the same VRF.
+# SW1 uses default VRF so tunnel has no bound dev.
+# SW2 uses non-default VRF tunnel has a bound dev.
+# +-------------------------+
+# | H1 |
+# | $h1 + |
+# | 192.0.2.1/28 | |
+# +-------------------|-----+
+# |
+# +-------------------|-----+
+# | SW1 | |
+# | $ol1 + |
+# | 192.0.2.2/28 |
+# | |
+# | + g1a (gre) |
+# | loc=192.0.2.65 |
+# | rem=192.0.2.66 --. |
+# | tos=inherit | |
+# | .------------------' |
+# | | |
+# | v |
+# | + $ul1.111 (vlan) |
+# | | 192.0.2.129/28 |
+# | \ |
+# | \_______ |
+# | | |
+# |VRF default + $ul1 |
+# +------------|------------+
+# |
+# +------------|------------+
+# | SW2 + $ul2 |
+# | _______| |
+# | / |
+# | / |
+# | + $ul2.111 (vlan) |
+# | ^ 192.0.2.130/28 |
+# | | |
+# | | |
+# | '------------------. |
+# | + g2a (gre) | |
+# | loc=192.0.2.66 | |
+# | rem=192.0.2.65 --' |
+# | tos=inherit |
+# | |
+# | $ol2 + |
+# | 192.0.2.17/28 | |
+# | VRF v$ol2 | |
+# +-------------------|-----+
+# |
+# +-------------------|-----+
+# | H2 | |
+# | $h2 + |
+# | 192.0.2.18/28 |
+# +-------------------------+
+#
+# Hierarchical model:
+# The tunnel is bound to a device in a different VRF
+#
+# +---------------------------+
+# | H1 |
+# | $h1 + |
+# | 192.0.2.1/28 | |
+# +-------------------|-------+
+# |
+# +-------------------|-------+
+# | SW1 | |
+# | +-----------------|-----+ |
+# | | $ol1 + | |
+# | | 192.0.2.2/28 | |
+# | | | |
+# | | + g1a (gre) | |
+# | | rem=192.0.2.66 | |
+# | | tos=inherit | |
+# | | loc=192.0.2.65 | |
+# | | ^ | |
+# | | VRF v$ol1 | | |
+# | +-----------|-----------+ |
+# | | |
+# | +-----------|-----------+ |
+# | | VRF v$ul1 | | |
+# | | | | |
+# | | | | |
+# | | v | |
+# | | dummy1 + | |
+# | | 192.0.2.65 | |
+# | | .-------' | |
+# | | | | |
+# | | v | |
+# | | + $ul1.111 (vlan) | |
+# | | | 192.0.2.129/28 | |
+# | | \ | |
+# | | \_____ | |
+# | | | | |
+# | | + $ul1 | |
+# | +----------|------------+ |
+# +------------|--------------+
+# |
+# +------------|--------------+
+# | SW2 | |
+# | +----------|------------+ |
+# | | + $ul2 | |
+# | | _____| | |
+# | | / | |
+# | | / | |
+# | | | $ul2.111 (vlan) | |
+# | | + 192.0.2.130/28 | |
+# | | ^ | |
+# | | | | |
+# | | '-------. | |
+# | | dummy2 + | |
+# | | 192.0.2.66 | |
+# | | ^ | |
+# | | | | |
+# | | | | |
+# | | VRF v$ul2 | | |
+# | +-----------|-----------+ |
+# | | |
+# | +-----------|-----------+ |
+# | | VRF v$ol2 | | |
+# | | | | |
+# | | v | |
+# | | g2a (gre)+ | |
+# | | loc=192.0.2.66 | |
+# | | rem=192.0.2.65 | |
+# | | tos=inherit | |
+# | | | |
+# | | $ol2 + | |
+# | | 192.0.2.17/28 | | |
+# | +-----------------|-----+ |
+# +-------------------|-------+
+# |
+# +-------------------|-------+
+# | H2 | |
+# | $h2 + |
+# | 192.0.2.18/28 |
+# +---------------------------+
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
+ ip route add vrf v$h1 192.0.2.16/28 via 192.0.2.2
+}
+
+h1_destroy()
+{
+ ip route del vrf v$h1 192.0.2.16/28 via 192.0.2.2
+ simple_if_fini $h1 192.0.2.1/28
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.18/28
+ ip route add vrf v$h2 192.0.2.0/28 via 192.0.2.17
+}
+
+h2_destroy()
+{
+ ip route del vrf v$h2 192.0.2.0/28 via 192.0.2.17
+ simple_if_fini $h2 192.0.2.18/28
+}
+
+sw1_flat_create()
+{
+ local type=$1; shift
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ ip link set dev $ol1 up
+ __addr_add_del $ol1 add "192.0.2.2/28"
+
+ ip link set dev $ul1 up
+ vlan_create $ul1 111 "" 192.0.2.129/28
+
+ tunnel_create g1a $type 192.0.2.65 192.0.2.66 tos inherit "$@"
+ ip link set dev g1a up
+ __addr_add_del g1a add "192.0.2.65/32"
+
+ ip route add 192.0.2.66/32 via 192.0.2.130
+
+ ip route add 192.0.2.16/28 nexthop dev g1a
+}
+
+sw1_flat_destroy()
+{
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ ip route del 192.0.2.16/28
+
+ ip route del 192.0.2.66/32 via 192.0.2.130
+ __simple_if_fini g1a 192.0.2.65/32
+ tunnel_destroy g1a
+
+ vlan_destroy $ul1 111
+ __simple_if_fini $ul1
+ __simple_if_fini $ol1 192.0.2.2/28
+}
+
+sw2_flat_create()
+{
+ local type=$1; shift
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ simple_if_init $ol2 192.0.2.17/28
+ __simple_if_init $ul2 v$ol2
+ vlan_create $ul2 111 v$ol2 192.0.2.130/28
+
+ tunnel_create g2a $type 192.0.2.66 192.0.2.65 tos inherit dev v$ol2 \
+ "$@"
+ __simple_if_init g2a v$ol2 192.0.2.66/32
+
+ ip route add vrf v$ol2 192.0.2.65/32 via 192.0.2.129
+ ip route add vrf v$ol2 192.0.2.0/28 nexthop dev g2a
+}
+
+sw2_flat_destroy()
+{
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ ip route del vrf v$ol2 192.0.2.0/28
+
+ ip route del vrf v$ol2 192.0.2.65/32 via 192.0.2.129
+ __simple_if_fini g2a 192.0.2.66/32
+ tunnel_destroy g2a
+
+ vlan_destroy $ul2 111
+ __simple_if_fini $ul2
+ simple_if_fini $ol2 192.0.2.17/28
+}
+
+sw1_hierarchical_create()
+{
+ local type=$1; shift
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ simple_if_init $ol1 192.0.2.2/28
+ simple_if_init $ul1
+ ip link add name dummy1 type dummy
+ __simple_if_init dummy1 v$ul1 192.0.2.65/32
+
+ vlan_create $ul1 111 v$ul1 192.0.2.129/28
+ tunnel_create g1a $type 192.0.2.65 192.0.2.66 tos inherit dev dummy1 \
+ "$@"
+ ip link set dev g1a master v$ol1
+
+ ip route add vrf v$ul1 192.0.2.66/32 via 192.0.2.130
+ ip route add vrf v$ol1 192.0.2.16/28 nexthop dev g1a
+}
+
+sw1_hierarchical_destroy()
+{
+ local ol1=$1; shift
+ local ul1=$1; shift
+
+ ip route del vrf v$ol1 192.0.2.16/28
+ ip route del vrf v$ul1 192.0.2.66/32
+
+ tunnel_destroy g1a
+ vlan_destroy $ul1 111
+
+ __simple_if_fini dummy1 192.0.2.65/32
+ ip link del dev dummy1
+
+ simple_if_fini $ul1
+ simple_if_fini $ol1 192.0.2.2/28
+}
+
+sw2_hierarchical_create()
+{
+ local type=$1; shift
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ simple_if_init $ol2 192.0.2.17/28
+ simple_if_init $ul2
+
+ ip link add name dummy2 type dummy
+ __simple_if_init dummy2 v$ul2 192.0.2.66/32
+
+ vlan_create $ul2 111 v$ul2 192.0.2.130/28
+ tunnel_create g2a $type 192.0.2.66 192.0.2.65 tos inherit dev dummy2 \
+ "$@"
+ ip link set dev g2a master v$ol2
+
+ ip route add vrf v$ul2 192.0.2.65/32 via 192.0.2.129
+ ip route add vrf v$ol2 192.0.2.0/28 nexthop dev g2a
+}
+
+sw2_hierarchical_destroy()
+{
+ local ol2=$1; shift
+ local ul2=$1; shift
+
+ ip route del vrf v$ol2 192.0.2.0/28
+ ip route del vrf v$ul2 192.0.2.65/32
+
+ tunnel_destroy g2a
+ vlan_destroy $ul2 111
+
+ __simple_if_fini dummy2 192.0.2.66/32
+ ip link del dev dummy2
+
+ simple_if_fini $ul2
+ simple_if_fini $ol2 192.0.2.17/28
+}
+
+topo_mtu_change()
+{
+ local mtu=$1
+
+ ip link set mtu $mtu dev $h1
+ ip link set mtu $mtu dev $ol1
+ ip link set mtu $mtu dev g1a
+ ip link set mtu $mtu dev $ul1
+ ip link set mtu $mtu dev $ul1.111
+ ip link set mtu $mtu dev $h2
+ ip link set mtu $mtu dev $ol2
+ ip link set mtu $mtu dev g2a
+ ip link set mtu $mtu dev $ul2
+ ip link set mtu $mtu dev $ul2.111
+}
+
+test_mtu_change()
+{
+ local encap=$1; shift
+
+ RET=0
+
+ ping_do $h1 192.0.2.18 "-s 1800 -w 3"
+ check_fail $? "ping $encap should not pass with size 1800"
+
+ RET=0
+
+ topo_mtu_change 2000
+ ping_do $h1 192.0.2.18 "-s 1800 -w 3"
+ check_err $?
+ log_test "ping $encap packet size 1800 after MTU change"
+}
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 3f248d1f5b91..9385dc971269 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -17,6 +17,7 @@ NETIF_TYPE=${NETIF_TYPE:=veth}
NETIF_CREATE=${NETIF_CREATE:=yes}
MCD=${MCD:=smcrouted}
MC_CLI=${MC_CLI:=smcroutectl}
+PING_TIMEOUT=${PING_TIMEOUT:=5}
relative_path="${BASH_SOURCE%/*}"
if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
@@ -211,7 +212,7 @@ log_test()
return 1
fi
- printf "TEST: %-60s [PASS]\n" "$test_name $opt_str"
+ printf "TEST: %-60s [ OK ]\n" "$test_name $opt_str"
return 0
}
@@ -820,7 +821,8 @@ ping_do()
local vrf_name
vrf_name=$(master_name_get $if_name)
- ip vrf exec $vrf_name $PING $args $dip -c 10 -i 0.1 -w 2 &> /dev/null
+ ip vrf exec $vrf_name \
+ $PING $args $dip -c 10 -i 0.1 -w $PING_TIMEOUT &> /dev/null
}
ping_test()
@@ -840,7 +842,8 @@ ping6_do()
local vrf_name
vrf_name=$(master_name_get $if_name)
- ip vrf exec $vrf_name $PING6 $args $dip -c 10 -i 0.1 -w 2 &> /dev/null
+ ip vrf exec $vrf_name \
+ $PING6 $args $dip -c 10 -i 0.1 -w $PING_TIMEOUT &> /dev/null
}
ping6_test()
diff --git a/tools/testing/selftests/net/forwarding/loopback.sh b/tools/testing/selftests/net/forwarding/loopback.sh
new file mode 100755
index 000000000000..6e4626ae71b0
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/loopback.sh
@@ -0,0 +1,94 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="loopback_test"
+NUM_NETIFS=2
+source tc_common.sh
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24
+ tc qdisc add dev $h1 clsact
+}
+
+h1_destroy()
+{
+ tc qdisc del dev $h1 clsact
+ simple_if_fini $h1 192.0.2.1/24
+}
+
+h2_create()
+{
+ simple_if_init $h2
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2
+}
+
+loopback_test()
+{
+ RET=0
+
+ tc filter add dev $h1 ingress protocol arp pref 1 handle 101 flower \
+ skip_hw arp_op reply arp_tip 192.0.2.1 action drop
+
+ $MZ $h1 -c 1 -t arp -q
+
+ tc_check_packets "dev $h1 ingress" 101 1
+ check_fail $? "Matched on a filter without loopback setup"
+
+ ethtool -K $h1 loopback on
+ check_err $? "Failed to enable loopback"
+
+ setup_wait_dev $h1
+
+ $MZ $h1 -c 1 -t arp -q
+
+ tc_check_packets "dev $h1 ingress" 101 1
+ check_err $? "Did not match on filter with loopback"
+
+ ethtool -K $h1 loopback off
+ check_err $? "Failed to disable loopback"
+
+ $MZ $h1 -c 1 -t arp -q
+
+ tc_check_packets "dev $h1 ingress" 101 2
+ check_fail $? "Matched on a filter after loopback was removed"
+
+ tc filter del dev $h1 ingress protocol arp pref 1 handle 101 flower
+
+ log_test "loopback"
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ h2=${NETIFS[p2]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
index 61844caf671e..28d568c48a73 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
@@ -190,6 +190,8 @@ setup_prepare()
h4_create
switch_create
+ forwarding_enable
+
trap_install $h3 ingress
trap_install $h4 ingress
}
@@ -201,6 +203,8 @@ cleanup()
trap_uninstall $h4 ingress
trap_uninstall $h3 ingress
+ forwarding_restore
+
switch_destroy
h4_destroy
h3_destroy
@@ -220,11 +224,15 @@ test_lag_slave()
RET=0
+ tc filter add dev $swp1 ingress pref 999 \
+ proto 802.1q flower vlan_ethtype arp $tcflags \
+ action pass
mirror_install $swp1 ingress gt4 \
- "proto 802.1q flower vlan_id 333 $tcflags"
+ "proto 802.1q flower vlan_id 333 $tcflags"
# Test connectivity through $up_dev when $down_dev is set down.
ip link set dev $down_dev down
+ ip neigh flush dev br1
setup_wait_dev $up_dev
setup_wait_dev $host_dev
$ARPING -I br1 192.0.2.130 -qfc 1
@@ -240,6 +248,7 @@ test_lag_slave()
ip link set dev $up_dev up
ip link set dev $down_dev up
mirror_uninstall $swp1 ingress
+ tc filter del dev $swp1 ingress pref 999
log_test "$what ($tcflags)"
}
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
index 135902aa8b11..472bd023e2a5 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
@@ -79,6 +79,7 @@ test_span_gre_ttl()
mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 0
ip link set dev $tundev type $type ttl 50
+ sleep 2
mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 10
ip link set dev $tundev type $type ttl 100
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh b/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh
index 12914f40612d..09389f3b9369 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh
@@ -81,6 +81,8 @@ full_test_span_gre_dir_acl()
local match_dip=$1; shift
local what=$1; shift
+ RET=0
+
mirror_install $swp1 $direction $tundev \
"protocol ip flower $tcflags dst_ip $match_dip"
fail_test_span_gre_dir $tundev $direction
@@ -108,8 +110,6 @@ test_ip6gretap()
test_all()
{
- RET=0
-
slow_path_trap_install $swp1 ingress
slow_path_trap_install $swp1 egress
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
index 204b25f13934..c02291e9841e 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
@@ -1,11 +1,44 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-# This test uses standard topology for testing gretap. See
-# mirror_gre_topo_lib.sh for more details.
-#
# Test for "tc action mirred egress mirror" when the underlay route points at a
# vlan device on top of a bridge device with vlan filtering (802.1q).
+#
+# +---------------------+ +---------------------+
+# | H1 | | H2 |
+# | + $h1 | | $h2 + |
+# | | 192.0.2.1/28 | | 192.0.2.2/28 | |
+# +-----|---------------+ +---------------|-----+
+# | |
+# +-----|-------------------------------------------------------------|-----+
+# | SW o--> mirred egress mirror dev {gt4,gt6} | |
+# | | | |
+# | +---|-------------------------------------------------------------|---+ |
+# | | + $swp1 br1 $swp2 + | |
+# | | | |
+# | | + $swp3 | |
+# | +---|-----------------------------------------------------------------+ |
+# | | | |
+# | | + br1.555 |
+# | | 192.0.2.130/28 |
+# | | 2001:db8:2::2/64 |
+# | | |
+# | | + gt6 (ip6gretap) + gt4 (gretap) |
+# | | : loc=2001:db8:2::1 : loc=192.0.2.129 |
+# | | : rem=2001:db8:2::2 : rem=192.0.2.130 |
+# | | : ttl=100 : ttl=100 |
+# | | : tos=inherit : tos=inherit |
+# | | : : |
+# +-----|---------------------:----------------------:----------------------+
+# | : :
+# +-----|---------------------:----------------------:----------------------+
+# | H3 + $h3 + h3-gt6 (ip6gretap) + h3-gt4 (gretap) |
+# | | loc=2001:db8:2::2 loc=192.0.2.130 |
+# | + $h3.555 rem=2001:db8:2::1 rem=192.0.2.129 |
+# | 192.0.2.130/28 ttl=100 ttl=100 |
+# | 2001:db8:2::2/64 tos=inherit tos=inherit |
+# | |
+# +-------------------------------------------------------------------------+
ALL_TESTS="
test_gretap
@@ -30,6 +63,15 @@ source mirror_gre_topo_lib.sh
require_command $ARPING
+h3_addr_add_del()
+{
+ local add_del=$1; shift
+ local dev=$1; shift
+
+ ip addr $add_del dev $dev 192.0.2.130/28
+ ip addr $add_del dev $dev 2001:db8:2::2/64
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
@@ -55,7 +97,8 @@ setup_prepare()
ip route rep 192.0.2.130/32 dev br1.555
ip -6 route rep 2001:db8:2::2/128 dev br1.555
- vlan_create $h3 555 v$h3 192.0.2.130/28 2001:db8:2::2/64
+ vlan_create $h3 555 v$h3
+ h3_addr_add_del add $h3.555
ip link set dev $swp3 master br1
bridge vlan add dev $swp3 vid 555
@@ -68,6 +111,8 @@ cleanup()
ip link set dev $swp2 nomaster
ip link set dev $swp3 nomaster
+
+ h3_addr_add_del del $h3.555
vlan_destroy $h3 555
vlan_destroy br1 555
@@ -182,13 +227,19 @@ test_span_gre_untagged_egress()
quick_test_span_gre_dir $tundev ingress
quick_test_span_vlan_dir $h3 555 ingress
+ h3_addr_add_del del $h3.555
bridge vlan add dev $swp3 vid 555 pvid untagged
- sleep 1
+ h3_addr_add_del add $h3
+ sleep 5
+
quick_test_span_gre_dir $tundev ingress
fail_test_span_vlan_dir $h3 555 ingress
+ h3_addr_add_del del $h3
bridge vlan add dev $swp3 vid 555
- sleep 1
+ h3_addr_add_del add $h3.555
+ sleep 5
+
quick_test_span_gre_dir $tundev ingress
quick_test_span_vlan_dir $h3 555 ingress
@@ -218,12 +269,25 @@ test_span_gre_fdb_roaming()
mirror_install $swp1 ingress $tundev "matchall $tcflags"
quick_test_span_gre_dir $tundev ingress
- bridge fdb del dev $swp3 $h3mac vlan 555 master
- bridge fdb add dev $swp2 $h3mac vlan 555 master
- sleep 1
- fail_test_span_gre_dir $tundev ingress
-
- bridge fdb del dev $swp2 $h3mac vlan 555 master
+ while ((RET == 0)); do
+ bridge fdb del dev $swp3 $h3mac vlan 555 master 2>/dev/null
+ bridge fdb add dev $swp2 $h3mac vlan 555 master
+ sleep 1
+ fail_test_span_gre_dir $tundev ingress
+
+ if ! bridge fdb sh dev $swp2 vlan 555 master \
+ | grep -q $h3mac; then
+ printf "TEST: %-60s [RETRY]\n" \
+ "$what: MAC roaming ($tcflags)"
+ # ARP or ND probably reprimed the FDB while the test
+ # was running. We would get a spurious failure.
+ RET=0
+ continue
+ fi
+ break
+ done
+
+ bridge fdb del dev $swp2 $h3mac vlan 555 master 2>/dev/null
# Re-prime FDB
$ARPING -I br1.555 192.0.2.130 -fqc 1
sleep 1
diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh
index 07991e1025c7..00797597fcf5 100644
--- a/tools/testing/selftests/net/forwarding/mirror_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh
@@ -29,9 +29,12 @@ mirror_test()
local pref=$1; shift
local expect=$1; shift
+ local ping_timeout=$((PING_TIMEOUT * 5))
local t0=$(tc_rule_stats_get $dev $pref)
ip vrf exec $vrf_name \
- ${PING} ${sip:+-I $sip} $dip -c 10 -i 0.1 -w 2 &> /dev/null
+ ${PING} ${sip:+-I $sip} $dip -c 10 -i 0.5 -w $ping_timeout \
+ &> /dev/null
+ sleep 0.5
local t1=$(tc_rule_stats_get $dev $pref)
local delta=$((t1 - t0))
# Tolerate a couple stray extra packets.
diff --git a/tools/testing/selftests/net/forwarding/router_broadcast.sh b/tools/testing/selftests/net/forwarding/router_broadcast.sh
index 7bd2ebb6e9de..4eac0a06f451 100755
--- a/tools/testing/selftests/net/forwarding/router_broadcast.sh
+++ b/tools/testing/selftests/net/forwarding/router_broadcast.sh
@@ -145,16 +145,19 @@ bc_forwarding_disable()
{
sysctl_set net.ipv4.conf.all.bc_forwarding 0
sysctl_set net.ipv4.conf.$rp1.bc_forwarding 0
+ sysctl_set net.ipv4.conf.$rp2.bc_forwarding 0
}
bc_forwarding_enable()
{
sysctl_set net.ipv4.conf.all.bc_forwarding 1
sysctl_set net.ipv4.conf.$rp1.bc_forwarding 1
+ sysctl_set net.ipv4.conf.$rp2.bc_forwarding 1
}
bc_forwarding_restore()
{
+ sysctl_restore net.ipv4.conf.$rp2.bc_forwarding
sysctl_restore net.ipv4.conf.$rp1.bc_forwarding
sysctl_restore net.ipv4.conf.all.bc_forwarding
}
@@ -170,7 +173,8 @@ ping_test_from()
log_info "ping $dip, expected reply from $from"
ip vrf exec $(master_name_get $oif) \
- $PING -I $oif $dip -c 10 -i 0.1 -w 2 -b 2>&1 | grep $from &> /dev/null
+ $PING -I $oif $dip -c 10 -i 0.1 -w $PING_TIMEOUT -b 2>&1 \
+ | grep "bytes from $from" > /dev/null
check_err_fail $fail $?
}
diff --git a/tools/testing/selftests/net/forwarding/router_multicast.sh b/tools/testing/selftests/net/forwarding/router_multicast.sh
index 109e6d785169..57e90c873a2c 100755
--- a/tools/testing/selftests/net/forwarding/router_multicast.sh
+++ b/tools/testing/selftests/net/forwarding/router_multicast.sh
@@ -28,7 +28,7 @@
# +------------------+ +------------------+
#
-ALL_TESTS="mcast_v4 mcast_v6"
+ALL_TESTS="mcast_v4 mcast_v6 rpf_v4 rpf_v6"
NUM_NETIFS=6
source lib.sh
source tc_common.sh
@@ -46,10 +46,14 @@ h1_create()
ip route add 2001:db8:2::/64 vrf v$h1 nexthop via 2001:db8:1::1
ip route add 2001:db8:3::/64 vrf v$h1 nexthop via 2001:db8:1::1
+
+ tc qdisc add dev $h1 ingress
}
h1_destroy()
{
+ tc qdisc del dev $h1 ingress
+
ip route del 2001:db8:3::/64 vrf v$h1
ip route del 2001:db8:2::/64 vrf v$h1
@@ -124,10 +128,14 @@ router_create()
ip address add 2001:db8:1::1/64 dev $rp1
ip address add 2001:db8:2::1/64 dev $rp2
ip address add 2001:db8:3::1/64 dev $rp3
+
+ tc qdisc add dev $rp3 ingress
}
router_destroy()
{
+ tc qdisc del dev $rp3 ingress
+
ip address del 2001:db8:3::1/64 dev $rp3
ip address del 2001:db8:2::1/64 dev $rp2
ip address del 2001:db8:1::1/64 dev $rp1
@@ -301,6 +309,103 @@ mcast_v6()
log_test "mcast IPv6"
}
+rpf_v4()
+{
+ # Add a multicast route from first router port to the other two. Send
+ # matching packets and test that both hosts receive them. Then, send
+ # the same packets via the third router port and test that they do not
+ # reach any host due to RPF check. A filter with 'skip_hw' is added to
+ # test that devices capable of multicast routing offload trap those
+ # packets. The filter is essentialy a NOP in other scenarios.
+
+ RET=0
+
+ tc filter add dev $h1 ingress protocol ip pref 1 handle 1 flower \
+ dst_ip 225.1.2.3 ip_proto udp dst_port 12345 action drop
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 1 flower \
+ dst_ip 225.1.2.3 ip_proto udp dst_port 12345 action drop
+ tc filter add dev $h3 ingress protocol ip pref 1 handle 1 flower \
+ dst_ip 225.1.2.3 ip_proto udp dst_port 12345 action drop
+ tc filter add dev $rp3 ingress protocol ip pref 1 handle 1 flower \
+ skip_hw dst_ip 225.1.2.3 ip_proto udp dst_port 12345 action pass
+
+ create_mcast_sg $rp1 198.51.100.2 225.1.2.3 $rp2 $rp3
+
+ $MZ $h1 -c 5 -p 128 -t udp "ttl=10,sp=54321,dp=12345" \
+ -a 00:11:22:33:44:55 -b 01:00:5e:01:02:03 \
+ -A 198.51.100.2 -B 225.1.2.3 -q
+
+ tc_check_packets "dev $h2 ingress" 1 5
+ check_err $? "Multicast not received on first host"
+ tc_check_packets "dev $h3 ingress" 1 5
+ check_err $? "Multicast not received on second host"
+
+ $MZ $h3 -c 5 -p 128 -t udp "ttl=10,sp=54321,dp=12345" \
+ -a 00:11:22:33:44:55 -b 01:00:5e:01:02:03 \
+ -A 198.51.100.2 -B 225.1.2.3 -q
+
+ tc_check_packets "dev $h1 ingress" 1 0
+ check_err $? "Multicast received on first host when should not"
+ tc_check_packets "dev $h2 ingress" 1 5
+ check_err $? "Multicast received on second host when should not"
+ tc_check_packets "dev $rp3 ingress" 1 5
+ check_err $? "Packets not trapped due to RPF check"
+
+ delete_mcast_sg $rp1 198.51.100.2 225.1.2.3 $rp2 $rp3
+
+ tc filter del dev $rp3 ingress protocol ip pref 1 handle 1 flower
+ tc filter del dev $h3 ingress protocol ip pref 1 handle 1 flower
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 1 flower
+ tc filter del dev $h1 ingress protocol ip pref 1 handle 1 flower
+
+ log_test "RPF IPv4"
+}
+
+rpf_v6()
+{
+ RET=0
+
+ tc filter add dev $h1 ingress protocol ipv6 pref 1 handle 1 flower \
+ dst_ip ff0e::3 ip_proto udp dst_port 12345 action drop
+ tc filter add dev $h2 ingress protocol ipv6 pref 1 handle 1 flower \
+ dst_ip ff0e::3 ip_proto udp dst_port 12345 action drop
+ tc filter add dev $h3 ingress protocol ipv6 pref 1 handle 1 flower \
+ dst_ip ff0e::3 ip_proto udp dst_port 12345 action drop
+ tc filter add dev $rp3 ingress protocol ipv6 pref 1 handle 1 flower \
+ skip_hw dst_ip ff0e::3 ip_proto udp dst_port 12345 action pass
+
+ create_mcast_sg $rp1 2001:db8:1::2 ff0e::3 $rp2 $rp3
+
+ $MZ $h1 -6 -c 5 -p 128 -t udp "ttl=10,sp=54321,dp=12345" \
+ -a 00:11:22:33:44:55 -b 33:33:00:00:00:03 \
+ -A 2001:db8:1::2 -B ff0e::3 -q
+
+ tc_check_packets "dev $h2 ingress" 1 5
+ check_err $? "Multicast not received on first host"
+ tc_check_packets "dev $h3 ingress" 1 5
+ check_err $? "Multicast not received on second host"
+
+ $MZ $h3 -6 -c 5 -p 128 -t udp "ttl=10,sp=54321,dp=12345" \
+ -a 00:11:22:33:44:55 -b 33:33:00:00:00:03 \
+ -A 2001:db8:1::2 -B ff0e::3 -q
+
+ tc_check_packets "dev $h1 ingress" 1 0
+ check_err $? "Multicast received on first host when should not"
+ tc_check_packets "dev $h2 ingress" 1 5
+ check_err $? "Multicast received on second host when should not"
+ tc_check_packets "dev $rp3 ingress" 1 5
+ check_err $? "Packets not trapped due to RPF check"
+
+ delete_mcast_sg $rp1 2001:db8:1::2 ff0e::3 $rp2 $rp3
+
+ tc filter del dev $rp3 ingress protocol ipv6 pref 1 handle 1 flower
+ tc filter del dev $h3 ingress protocol ipv6 pref 1 handle 1 flower
+ tc filter del dev $h2 ingress protocol ipv6 pref 1 handle 1 flower
+ tc filter del dev $h1 ingress protocol ipv6 pref 1 handle 1 flower
+
+ log_test "RPF IPv6"
+}
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh
index 20d1077e5a3d..124803eea4a9 100755
--- a/tools/testing/selftests/net/forwarding/tc_flower.sh
+++ b/tools/testing/selftests/net/forwarding/tc_flower.sh
@@ -2,7 +2,8 @@
# SPDX-License-Identifier: GPL-2.0
ALL_TESTS="match_dst_mac_test match_src_mac_test match_dst_ip_test \
- match_src_ip_test match_ip_flags_test"
+ match_src_ip_test match_ip_flags_test match_pcp_test match_vlan_test \
+ match_ip_tos_test"
NUM_NETIFS=2
source tc_common.sh
source lib.sh
@@ -219,6 +220,96 @@ match_ip_flags_test()
log_test "ip_flags match ($tcflags)"
}
+match_pcp_test()
+{
+ RET=0
+
+ vlan_create $h2 85 v$h2 192.0.2.11/24
+
+ tc filter add dev $h2 ingress protocol 802.1q pref 1 handle 101 \
+ flower vlan_prio 6 $tcflags dst_mac $h2mac action drop
+ tc filter add dev $h2 ingress protocol 802.1q pref 2 handle 102 \
+ flower vlan_prio 7 $tcflags dst_mac $h2mac action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -B 192.0.2.11 -Q 7:85 -t ip -q
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -B 192.0.2.11 -Q 0:85 -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 0
+ check_err $? "Matched on specified PCP when should not"
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on specified PCP"
+
+ tc filter del dev $h2 ingress protocol 802.1q pref 2 handle 102 flower
+ tc filter del dev $h2 ingress protocol 802.1q pref 1 handle 101 flower
+
+ vlan_destroy $h2 85
+
+ log_test "PCP match ($tcflags)"
+}
+
+match_vlan_test()
+{
+ RET=0
+
+ vlan_create $h2 85 v$h2 192.0.2.11/24
+ vlan_create $h2 75 v$h2 192.0.2.10/24
+
+ tc filter add dev $h2 ingress protocol 802.1q pref 1 handle 101 \
+ flower vlan_id 75 $tcflags action drop
+ tc filter add dev $h2 ingress protocol 802.1q pref 2 handle 102 \
+ flower vlan_id 85 $tcflags action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -B 192.0.2.11 -Q 0:85 -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 0
+ check_err $? "Matched on specified VLAN when should not"
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on specified VLAN"
+
+ tc filter del dev $h2 ingress protocol 802.1q pref 2 handle 102 flower
+ tc filter del dev $h2 ingress protocol 802.1q pref 1 handle 101 flower
+
+ vlan_destroy $h2 75
+ vlan_destroy $h2 85
+
+ log_test "VLAN match ($tcflags)"
+}
+
+match_ip_tos_test()
+{
+ RET=0
+
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+ $tcflags dst_ip 192.0.2.2 ip_tos 0x20 action drop
+ tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
+ $tcflags dst_ip 192.0.2.2 ip_tos 0x18 action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip tos=18 -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched on a wrong filter (0x18)"
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct filter (0x18)"
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip tos=20 -q
+
+ tc_check_packets "dev $h2 ingress" 102 2
+ check_fail $? "Matched on a wrong filter (0x20)"
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_err $? "Did not match on correct filter (0x20)"
+
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+
+ log_test "ip_tos match ($tcflags)"
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
diff --git a/tools/testing/selftests/net/forwarding/tc_vlan_modify.sh b/tools/testing/selftests/net/forwarding/tc_vlan_modify.sh
new file mode 100755
index 000000000000..45378905cb97
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/tc_vlan_modify.sh
@@ -0,0 +1,164 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="
+ vlan_modify_ingress
+ vlan_modify_egress
+"
+
+NUM_NETIFS=4
+CHECK_TC="yes"
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
+ vlan_create $h1 85 v$h1 192.0.2.17/28 2001:db8:2::1/64
+}
+
+h1_destroy()
+{
+ vlan_destroy $h1 85
+ simple_if_fini $h1 192.0.2.1/28 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.2/28 2001:db8:1::2/64
+ vlan_create $h2 65 v$h2 192.0.2.18/28 2001:db8:2::2/64
+}
+
+h2_destroy()
+{
+ vlan_destroy $h2 65
+ simple_if_fini $h2 192.0.2.2/28 2001:db8:1::2/64
+}
+
+switch_create()
+{
+ ip link add dev br0 type bridge vlan_filtering 1 mcast_snooping 0
+
+ ip link set dev $swp1 master br0
+ ip link set dev $swp2 master br0
+
+ ip link set dev br0 up
+ ip link set dev $swp1 up
+ ip link set dev $swp2 up
+
+ bridge vlan add dev $swp1 vid 85
+ bridge vlan add dev $swp2 vid 65
+
+ bridge vlan add dev $swp2 vid 85
+ bridge vlan add dev $swp1 vid 65
+
+ tc qdisc add dev $swp1 clsact
+ tc qdisc add dev $swp2 clsact
+}
+
+switch_destroy()
+{
+ tc qdisc del dev $swp2 clsact
+ tc qdisc del dev $swp1 clsact
+
+ bridge vlan del vid 65 dev $swp1
+ bridge vlan del vid 85 dev $swp2
+
+ bridge vlan del vid 65 dev $swp2
+ bridge vlan del vid 85 dev $swp1
+
+ ip link set dev $swp2 down
+ ip link set dev $swp1 down
+
+ ip link del dev br0
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+vlan_modify_ingress()
+{
+ RET=0
+
+ ping_do $h1.85 192.0.2.18
+ check_fail $? "ping between two different vlans passed when should not"
+
+ ping6_do $h1.85 2001:db8:2::2
+ check_fail $? "ping6 between two different vlans passed when should not"
+
+ tc filter add dev $swp1 ingress protocol all pref 1 handle 1 \
+ flower action vlan modify id 65
+ tc filter add dev $swp2 ingress protocol all pref 1 handle 1 \
+ flower action vlan modify id 85
+
+ ping_do $h1.85 192.0.2.18
+ check_err $? "ping between two different vlans failed when should not"
+
+ ping6_do $h1.85 2001:db8:2::2
+ check_err $? "ping6 between two different vlans failed when should not"
+
+ log_test "VLAN modify at ingress"
+
+ tc filter del dev $swp2 ingress protocol all pref 1 handle 1 flower
+ tc filter del dev $swp1 ingress protocol all pref 1 handle 1 flower
+}
+
+vlan_modify_egress()
+{
+ RET=0
+
+ ping_do $h1.85 192.0.2.18
+ check_fail $? "ping between two different vlans passed when should not"
+
+ ping6_do $h1.85 2001:db8:2::2
+ check_fail $? "ping6 between two different vlans passed when should not"
+
+ tc filter add dev $swp1 egress protocol all pref 1 handle 1 \
+ flower action vlan modify id 85
+ tc filter add dev $swp2 egress protocol all pref 1 handle 1 \
+ flower action vlan modify id 65
+
+ ping_do $h1.85 192.0.2.18
+ check_err $? "ping between two different vlans failed when should not"
+
+ ping6_do $h1.85 2001:db8:2::2
+ check_err $? "ping6 between two different vlans failed when should not"
+
+ log_test "VLAN modify at egress"
+
+ tc filter del dev $swp2 egress protocol all pref 1 handle 1 flower
+ tc filter del dev $swp1 egress protocol all pref 1 handle 1 flower
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
new file mode 100755
index 000000000000..a0b5f57d6bd3
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
@@ -0,0 +1,567 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +---------------------------+ +------------------------------+
+# | vrf-h1 | | vrf-h2 |
+# | + $h1 | | + $h2 |
+# | | 10.1.1.101/24 | | | 10.1.2.101/24 |
+# | | default via 10.1.1.1 | | | default via 10.1.2.1 |
+# +----|----------------------+ +----|-------------------------+
+# | |
+# +----|--------------------------------------------|-------------------------+
+# | SW | | |
+# | +--|--------------------------------------------|-----------------------+ |
+# | | + $swp1 br1 + $swp2 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + vx10 + vx20 | |
+# | | local 10.0.0.1 local 10.0.0.1 | |
+# | | remote 10.0.0.2 remote 10.0.0.2 | |
+# | | id 1000 id 2000 | |
+# | | dstport 4789 dstport 4789 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | +-----------------------------------+-----------------------------------+ |
+# | | |
+# | +-----------------------------------|-----------------------------------+ |
+# | | | | |
+# | | +--------------------------------+--------------------------------+ | |
+# | | | | | |
+# | | + vlan10 vlan20 + | |
+# | | | 10.1.1.11/24 10.1.2.11/24 | | |
+# | | | | | |
+# | | + vlan10-v (macvlan) vlan20-v (macvlan) + | |
+# | | 10.1.1.1/24 10.1.2.1/24 | |
+# | | 00:00:5e:00:01:01 00:00:5e:00:01:01 | |
+# | | vrf-green | |
+# | +-----------------------------------------------------------------------+ |
+# | |
+# | + $rp1 +lo |
+# | | 192.0.2.1/24 10.0.0.1/32 |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|--------------------------------------------------------+
+# | | vrf-spine |
+# | + $rp2 |
+# | 192.0.2.2/24 |
+# | | (maybe) HW
+# =============================================================================
+# | | (likely) SW
+# | |
+# | + v1 (veth) |
+# | | 192.0.3.2/24 |
+# +----|--------------------------------------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | + v2 (veth) +lo NS1 (netns) |
+# | 192.0.3.1/24 10.0.0.2/32 |
+# | |
+# | +-----------------------------------------------------------------------+ |
+# | | vrf-green | |
+# | | + vlan10-v (macvlan) vlan20-v (macvlan) + | |
+# | | | 10.1.1.1/24 10.1.2.1/24 | | |
+# | | | 00:00:5e:00:01:01 00:00:5e:00:01:01 | | |
+# | | | | | |
+# | | + vlan10 vlan20 + | |
+# | | | 10.1.1.12/24 10.1.2.12/24 | | |
+# | | | | | |
+# | | +--------------------------------+--------------------------------+ | |
+# | | | | |
+# | +-----------------------------------|-----------------------------------+ |
+# | | |
+# | +-----------------------------------+-----------------------------------+ |
+# | | | |
+# | | + vx10 + vx20 | |
+# | | local 10.0.0.2 local 10.0.0.2 | |
+# | | remote 10.0.0.1 remote 10.0.0.1 | |
+# | | id 1000 id 2000 | |
+# | | dstport 4789 dstport 4789 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + w1 (veth) + w3 (veth) | |
+# | | | vid 10 pvid untagged br1 | vid 20 pvid untagged | |
+# | +--|------------------------------------------|-------------------------+ |
+# | | | |
+# | | | |
+# | +--|----------------------+ +--|-------------------------+ |
+# | | | vrf-h1 | | | vrf-h2 | |
+# | | + w2 (veth) | | + w4 (veth) | |
+# | | 10.1.1.102/24 | | 10.1.2.102/24 | |
+# | | default via 10.1.1.1 | | default via 10.1.2.1 | |
+# | +-------------------------+ +----------------------------+ |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ arp_decap
+ arp_suppression
+"
+NUM_NETIFS=6
+source lib.sh
+
+require_command $ARPING
+
+hx_create()
+{
+ local vrf_name=$1; shift
+ local if_name=$1; shift
+ local ip_addr=$1; shift
+ local gw_ip=$1; shift
+
+ vrf_create $vrf_name
+ ip link set dev $if_name master $vrf_name
+ ip link set dev $vrf_name up
+ ip link set dev $if_name up
+
+ ip address add $ip_addr/24 dev $if_name
+ ip neigh replace $gw_ip lladdr 00:00:5e:00:01:01 nud permanent \
+ dev $if_name
+ ip route add default vrf $vrf_name nexthop via $gw_ip
+}
+export -f hx_create
+
+hx_destroy()
+{
+ local vrf_name=$1; shift
+ local if_name=$1; shift
+ local ip_addr=$1; shift
+ local gw_ip=$1; shift
+
+ ip route del default vrf $vrf_name nexthop via $gw_ip
+ ip neigh del $gw_ip dev $if_name
+ ip address del $ip_addr/24 dev $if_name
+
+ ip link set dev $if_name down
+ vrf_destroy $vrf_name
+}
+
+h1_create()
+{
+ hx_create "vrf-h1" $h1 10.1.1.101 10.1.1.1
+}
+
+h1_destroy()
+{
+ hx_destroy "vrf-h1" $h1 10.1.1.101 10.1.1.1
+}
+
+h2_create()
+{
+ hx_create "vrf-h2" $h2 10.1.2.101 10.1.2.1
+}
+
+h2_destroy()
+{
+ hx_destroy "vrf-h2" $h2 10.1.2.101 10.1.2.1
+}
+
+switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 0
+ # Make sure the bridge uses the MAC address of the local port and not
+ # that of the VxLAN's device.
+ ip link set dev br1 address $(mac_get $swp1)
+ ip link set dev br1 up
+
+ ip link set dev $rp1 up
+ ip address add dev $rp1 192.0.2.1/24
+ ip route add 10.0.0.2/32 nexthop via 192.0.2.2
+
+ ip link add name vx10 type vxlan id 1000 \
+ local 10.0.0.1 remote 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx10 up
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 2000 \
+ local 10.0.0.1 remote 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx20 up
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ bridge vlan add vid 10 dev $swp1 pvid untagged
+
+ ip link set dev $swp2 master br1
+ ip link set dev $swp2 up
+ bridge vlan add vid 20 dev $swp2 pvid untagged
+
+ ip address add 10.0.0.1/32 dev lo
+
+ # Create SVIs
+ vrf_create "vrf-green"
+ ip link set dev vrf-green up
+
+ ip link add link br1 name vlan10 up master vrf-green type vlan id 10
+ ip address add 10.1.1.11/24 dev vlan10
+ ip link add link vlan10 name vlan10-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.1.1/24 dev vlan10-v
+
+ ip link add link br1 name vlan20 up master vrf-green type vlan id 20
+ ip address add 10.1.2.11/24 dev vlan20
+ ip link add link vlan20 name vlan20-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.2.1/24 dev vlan20-v
+
+ bridge vlan add vid 10 dev br1 self
+ bridge vlan add vid 20 dev br1 self
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
+}
+
+switch_destroy()
+{
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
+
+ bridge vlan del vid 20 dev br1 self
+ bridge vlan del vid 10 dev br1 self
+
+ ip link del dev vlan20
+
+ ip link del dev vlan10
+
+ vrf_destroy "vrf-green"
+
+ ip address del 10.0.0.1/32 dev lo
+
+ bridge vlan del vid 20 dev $swp2
+ ip link set dev $swp2 down
+ ip link set dev $swp2 nomaster
+
+ bridge vlan del vid 10 dev $swp1
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+
+ bridge vlan del vid 20 dev vx20
+ ip link set dev vx20 nomaster
+
+ ip link set dev vx20 down
+ ip link del dev vx20
+
+ bridge vlan del vid 10 dev vx10
+ ip link set dev vx10 nomaster
+
+ ip link set dev vx10 down
+ ip link del dev vx10
+
+ ip route del 10.0.0.2/32 nexthop via 192.0.2.2
+ ip address del dev $rp1 192.0.2.1/24
+ ip link set dev $rp1 down
+
+ ip link set dev br1 down
+ ip link del dev br1
+}
+
+spine_create()
+{
+ vrf_create "vrf-spine"
+ ip link set dev $rp2 master vrf-spine
+ ip link set dev v1 master vrf-spine
+ ip link set dev vrf-spine up
+ ip link set dev $rp2 up
+ ip link set dev v1 up
+
+ ip address add 192.0.2.2/24 dev $rp2
+ ip address add 192.0.3.2/24 dev v1
+
+ ip route add 10.0.0.1/32 vrf vrf-spine nexthop via 192.0.2.1
+ ip route add 10.0.0.2/32 vrf vrf-spine nexthop via 192.0.3.1
+}
+
+spine_destroy()
+{
+ ip route del 10.0.0.2/32 vrf vrf-spine nexthop via 192.0.3.1
+ ip route del 10.0.0.1/32 vrf vrf-spine nexthop via 192.0.2.1
+
+ ip address del 192.0.3.2/24 dev v1
+ ip address del 192.0.2.2/24 dev $rp2
+
+ ip link set dev v1 down
+ ip link set dev $rp2 down
+ vrf_destroy "vrf-spine"
+}
+
+ns_h1_create()
+{
+ hx_create "vrf-h1" w2 10.1.1.102 10.1.1.1
+}
+export -f ns_h1_create
+
+ns_h2_create()
+{
+ hx_create "vrf-h2" w4 10.1.2.102 10.1.2.1
+}
+export -f ns_h2_create
+
+ns_switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 0
+ ip link set dev br1 up
+
+ ip link set dev v2 up
+ ip address add dev v2 192.0.3.1/24
+ ip route add 10.0.0.1/32 nexthop via 192.0.3.2
+
+ ip link add name vx10 type vxlan id 1000 \
+ local 10.0.0.2 remote 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx10 up
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 2000 \
+ local 10.0.0.2 remote 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx20 up
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ ip link set dev w1 master br1
+ ip link set dev w1 up
+ bridge vlan add vid 10 dev w1 pvid untagged
+
+ ip link set dev w3 master br1
+ ip link set dev w3 up
+ bridge vlan add vid 20 dev w3 pvid untagged
+
+ ip address add 10.0.0.2/32 dev lo
+
+ # Create SVIs
+ vrf_create "vrf-green"
+ ip link set dev vrf-green up
+
+ ip link add link br1 name vlan10 up master vrf-green type vlan id 10
+ ip address add 10.1.1.12/24 dev vlan10
+ ip link add link vlan10 name vlan10-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.1.1/24 dev vlan10-v
+
+ ip link add link br1 name vlan20 up master vrf-green type vlan id 20
+ ip address add 10.1.2.12/24 dev vlan20
+ ip link add link vlan20 name vlan20-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.2.1/24 dev vlan20-v
+
+ bridge vlan add vid 10 dev br1 self
+ bridge vlan add vid 20 dev br1 self
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
+}
+export -f ns_switch_create
+
+ns_init()
+{
+ ip link add name w1 type veth peer name w2
+ ip link add name w3 type veth peer name w4
+
+ ip link set dev lo up
+
+ ns_h1_create
+ ns_h2_create
+ ns_switch_create
+}
+export -f ns_init
+
+ns1_create()
+{
+ ip netns add ns1
+ ip link set dev v2 netns ns1
+ in_ns ns1 ns_init
+}
+
+ns1_destroy()
+{
+ ip netns exec ns1 ip link set dev v2 netns 1
+ ip netns del ns1
+}
+
+macs_populate()
+{
+ local mac1=$1; shift
+ local mac2=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
+ local dst=$1; shift
+
+ bridge fdb add $mac1 dev vx10 self master extern_learn static \
+ dst $dst vlan 10
+ bridge fdb add $mac2 dev vx20 self master extern_learn static \
+ dst $dst vlan 20
+
+ ip neigh add $ip1 lladdr $mac1 nud noarp dev vlan10 \
+ extern_learn
+ ip neigh add $ip2 lladdr $mac2 nud noarp dev vlan20 \
+ extern_learn
+}
+export -f macs_populate
+
+macs_initialize()
+{
+ local h1_ns_mac=$(in_ns ns1 mac_get w2)
+ local h2_ns_mac=$(in_ns ns1 mac_get w4)
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
+
+ macs_populate $h1_ns_mac $h2_ns_mac 10.1.1.102 10.1.2.102 10.0.0.2
+ in_ns ns1 macs_populate $h1_mac $h2_mac 10.1.1.101 10.1.2.101 10.0.0.1
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ rp1=${NETIFS[p5]}
+ rp2=${NETIFS[p6]}
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+ switch_create
+
+ ip link add name v1 type veth peer name v2
+ spine_create
+ ns1_create
+
+ macs_initialize
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ns1_destroy
+ spine_destroy
+ ip link del dev v1
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 10.1.2.101 ": local->local vid 10->vid 20"
+ ping_test $h1 10.1.1.102 ": local->remote vid 10->vid 10"
+ ping_test $h2 10.1.2.102 ": local->remote vid 20->vid 20"
+ ping_test $h1 10.1.2.102 ": local->remote vid 10->vid 20"
+ ping_test $h2 10.1.1.102 ": local->remote vid 20->vid 10"
+}
+
+arp_decap()
+{
+ # Repeat the ping tests, but without populating the neighbours. This
+ # makes sure we correctly decapsulate ARP packets
+ log_info "deleting neighbours from vlan interfaces"
+
+ ip neigh del 10.1.1.102 dev vlan10
+ ip neigh del 10.1.2.102 dev vlan20
+
+ ping_ipv4
+
+ ip neigh replace 10.1.1.102 lladdr $(in_ns ns1 mac_get w2) nud noarp \
+ dev vlan10 extern_learn
+ ip neigh replace 10.1.2.102 lladdr $(in_ns ns1 mac_get w4) nud noarp \
+ dev vlan20 extern_learn
+}
+
+arp_suppression_compare()
+{
+ local expect=$1; shift
+ local actual=$(in_ns ns1 tc_rule_stats_get vx10 1 ingress)
+
+ (( expect == actual ))
+ check_err $? "expected $expect arps got $actual"
+}
+
+arp_suppression()
+{
+ ip link set dev vx10 type bridge_slave neigh_suppress on
+
+ in_ns ns1 tc qdisc add dev vx10 clsact
+ in_ns ns1 tc filter add dev vx10 ingress proto arp pref 1 handle 101 \
+ flower dst_mac ff:ff:ff:ff:ff:ff arp_tip 10.1.1.102 arp_op \
+ request action pass
+
+ # The neighbour is configured on the SVI and ARP suppression is on, so
+ # the ARP request should be suppressed
+ RET=0
+
+ $ARPING -I $h1 -fqb -c 1 -w 1 10.1.1.102
+ check_err $? "arping failed"
+
+ arp_suppression_compare 0
+
+ log_test "neigh_suppress: on / neigh exists: yes"
+
+ # Delete the neighbour from the the SVI. A single ARP request should be
+ # received by the remote VTEP
+ RET=0
+
+ ip neigh del 10.1.1.102 dev vlan10
+
+ $ARPING -I $h1 -fqb -c 1 -w 1 10.1.1.102
+ check_err $? "arping failed"
+
+ arp_suppression_compare 1
+
+ log_test "neigh_suppress: on / neigh exists: no"
+
+ # Turn off ARP suppression and make sure ARP is not suppressed,
+ # regardless of neighbour existence on the SVI
+ RET=0
+
+ ip neigh del 10.1.1.102 dev vlan10 &> /dev/null
+ ip link set dev vx10 type bridge_slave neigh_suppress off
+
+ $ARPING -I $h1 -fqb -c 1 -w 1 10.1.1.102
+ check_err $? "arping failed"
+
+ arp_suppression_compare 2
+
+ log_test "neigh_suppress: off / neigh exists: no"
+
+ RET=0
+
+ ip neigh add 10.1.1.102 lladdr $(in_ns ns1 mac_get w2) nud noarp \
+ dev vlan10 extern_learn
+
+ $ARPING -I $h1 -fqb -c 1 -w 1 10.1.1.102
+ check_err $? "arping failed"
+
+ arp_suppression_compare 3
+
+ log_test "neigh_suppress: off / neigh exists: yes"
+
+ in_ns ns1 tc qdisc del dev vx10 clsact
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
new file mode 100755
index 000000000000..1209031bc794
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/vxlan_symmetric.sh
@@ -0,0 +1,551 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +---------------------------+ +------------------------------+
+# | vrf-h1 | | vrf-h2 |
+# | + $h1 | | + $h2 |
+# | | 10.1.1.101/24 | | | 10.1.2.101/24 |
+# | | default via 10.1.1.1 | | | default via 10.1.2.1 |
+# +----|----------------------+ +----|-------------------------+
+# | |
+# +----|--------------------------------------------|-------------------------+
+# | SW | | |
+# | +--|--------------------------------------------|-----------------------+ |
+# | | + $swp1 br1 + $swp2 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + vx10 + vx20 | |
+# | | local 10.0.0.1 local 10.0.0.1 | |
+# | | remote 10.0.0.2 remote 10.0.0.2 | |
+# | | id 1010 id 1020 | |
+# | | dstport 4789 dstport 4789 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + vx4001 | |
+# | | local 10.0.0.1 | |
+# | | remote 10.0.0.2 | |
+# | | id 104001 | |
+# | | dstport 4789 | |
+# | | vid 4001 pvid untagged | |
+# | | | |
+# | +-----------------------------------+-----------------------------------+ |
+# | | |
+# | +-----------------------------------|-----------------------------------+ |
+# | | | | |
+# | | +--------------------------------+--------------------------------+ | |
+# | | | | | | |
+# | | + vlan10 | vlan20 + | |
+# | | | 10.1.1.11/24 | 10.1.2.11/24 | | |
+# | | | | | | |
+# | | + vlan10-v (macvlan) + vlan20-v (macvlan) + | |
+# | | 10.1.1.1/24 vlan4001 10.1.2.1/24 | |
+# | | 00:00:5e:00:01:01 00:00:5e:00:01:01 | |
+# | | vrf-green | |
+# | +-----------------------------------------------------------------------+ |
+# | |
+# | + $rp1 +lo |
+# | | 192.0.2.1/24 10.0.0.1/32 |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|--------------------------------------------------------+
+# | | vrf-spine |
+# | + $rp2 |
+# | 192.0.2.2/24 |
+# | | (maybe) HW
+# =============================================================================
+# | | (likely) SW
+# | |
+# | + v1 (veth) |
+# | | 192.0.3.2/24 |
+# +----|--------------------------------------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | + v2 (veth) +lo NS1 (netns) |
+# | 192.0.3.1/24 10.0.0.2/32 |
+# | |
+# | +-----------------------------------------------------------------------+ |
+# | | vrf-green | |
+# | | + vlan10-v (macvlan) vlan20-v (macvlan) + | |
+# | | | 10.1.1.1/24 10.1.2.1/24 | | |
+# | | | 00:00:5e:00:01:01 00:00:5e:00:01:01 | | |
+# | | | vlan4001 | | |
+# | | + vlan10 + vlan20 + | |
+# | | | 10.1.1.12/24 | 10.1.2.12/24 | | |
+# | | | | | | |
+# | | +--------------------------------+--------------------------------+ | |
+# | | | | |
+# | +-----------------------------------|-----------------------------------+ |
+# | | |
+# | +-----------------------------------+-----------------------------------+ |
+# | | | |
+# | | + vx10 + vx20 | |
+# | | local 10.0.0.2 local 10.0.0.2 | |
+# | | remote 10.0.0.1 remote 10.0.0.1 | |
+# | | id 1010 id 1020 | |
+# | | dstport 4789 dstport 4789 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | | | |
+# | | + vx4001 | |
+# | | local 10.0.0.2 | |
+# | | remote 10.0.0.1 | |
+# | | id 104001 | |
+# | | dstport 4789 | |
+# | | vid 4001 pvid untagged | |
+# | | | |
+# | | + w1 (veth) + w3 (veth) | |
+# | | | vid 10 pvid untagged br1 | vid 20 pvid untagged | |
+# | +--|------------------------------------------|-------------------------+ |
+# | | | |
+# | | | |
+# | +--|----------------------+ +--|-------------------------+ |
+# | | | vrf-h1 | | | vrf-h2 | |
+# | | + w2 (veth) | | + w4 (veth) | |
+# | | 10.1.1.102/24 | | 10.1.2.102/24 | |
+# | | default via 10.1.1.1 | | default via 10.1.2.1 | |
+# | +-------------------------+ +----------------------------+ |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+"
+NUM_NETIFS=6
+source lib.sh
+
+hx_create()
+{
+ local vrf_name=$1; shift
+ local if_name=$1; shift
+ local ip_addr=$1; shift
+ local gw_ip=$1; shift
+
+ vrf_create $vrf_name
+ ip link set dev $if_name master $vrf_name
+ ip link set dev $vrf_name up
+ ip link set dev $if_name up
+
+ ip address add $ip_addr/24 dev $if_name
+ ip neigh replace $gw_ip lladdr 00:00:5e:00:01:01 nud permanent \
+ dev $if_name
+ ip route add default vrf $vrf_name nexthop via $gw_ip
+}
+export -f hx_create
+
+hx_destroy()
+{
+ local vrf_name=$1; shift
+ local if_name=$1; shift
+ local ip_addr=$1; shift
+ local gw_ip=$1; shift
+
+ ip route del default vrf $vrf_name nexthop via $gw_ip
+ ip neigh del $gw_ip dev $if_name
+ ip address del $ip_addr/24 dev $if_name
+
+ ip link set dev $if_name down
+ vrf_destroy $vrf_name
+}
+
+h1_create()
+{
+ hx_create "vrf-h1" $h1 10.1.1.101 10.1.1.1
+}
+
+h1_destroy()
+{
+ hx_destroy "vrf-h1" $h1 10.1.1.101 10.1.1.1
+}
+
+h2_create()
+{
+ hx_create "vrf-h2" $h2 10.1.2.101 10.1.2.1
+}
+
+h2_destroy()
+{
+ hx_destroy "vrf-h2" $h2 10.1.2.101 10.1.2.1
+}
+
+switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 0
+ # Make sure the bridge uses the MAC address of the local port and not
+ # that of the VxLAN's device.
+ ip link set dev br1 address $(mac_get $swp1)
+ ip link set dev br1 up
+
+ ip link set dev $rp1 up
+ ip address add dev $rp1 192.0.2.1/24
+ ip route add 10.0.0.2/32 nexthop via 192.0.2.2
+
+ ip link add name vx10 type vxlan id 1010 \
+ local 10.0.0.1 remote 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx10 up
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 1020 \
+ local 10.0.0.1 remote 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx20 up
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ bridge vlan add vid 10 dev $swp1 pvid untagged
+
+ ip link set dev $swp2 master br1
+ ip link set dev $swp2 up
+ bridge vlan add vid 20 dev $swp2 pvid untagged
+
+ ip link add name vx4001 type vxlan id 104001 \
+ local 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx4001 up
+
+ ip link set dev vx4001 master br1
+ bridge vlan add vid 4001 dev vx4001 pvid untagged
+
+ ip address add 10.0.0.1/32 dev lo
+
+ # Create SVIs
+ vrf_create "vrf-green"
+ ip link set dev vrf-green up
+
+ ip link add link br1 name vlan10 up master vrf-green type vlan id 10
+ ip address add 10.1.1.11/24 dev vlan10
+ ip link add link vlan10 name vlan10-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.1.1/24 dev vlan10-v
+
+ ip link add link br1 name vlan20 up master vrf-green type vlan id 20
+ ip address add 10.1.2.11/24 dev vlan20
+ ip link add link vlan20 name vlan20-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.2.1/24 dev vlan20-v
+
+ ip link add link br1 name vlan4001 up master vrf-green \
+ type vlan id 4001
+
+ bridge vlan add vid 10 dev br1 self
+ bridge vlan add vid 20 dev br1 self
+ bridge vlan add vid 4001 dev br1 self
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
+}
+
+switch_destroy()
+{
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 20
+ bridge fdb del 00:00:5e:00:01:01 dev br1 self local vlan 10
+
+ bridge vlan del vid 4001 dev br1 self
+ bridge vlan del vid 20 dev br1 self
+ bridge vlan del vid 10 dev br1 self
+
+ ip link del dev vlan4001
+
+ ip link del dev vlan20
+
+ ip link del dev vlan10
+
+ vrf_destroy "vrf-green"
+
+ ip address del 10.0.0.1/32 dev lo
+
+ bridge vlan del vid 20 dev $swp2
+ ip link set dev $swp2 down
+ ip link set dev $swp2 nomaster
+
+ bridge vlan del vid 10 dev $swp1
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+
+ bridge vlan del vid 4001 dev vx4001
+ ip link set dev vx4001 nomaster
+
+ ip link set dev vx4001 down
+ ip link del dev vx4001
+
+ bridge vlan del vid 20 dev vx20
+ ip link set dev vx20 nomaster
+
+ ip link set dev vx20 down
+ ip link del dev vx20
+
+ bridge vlan del vid 10 dev vx10
+ ip link set dev vx10 nomaster
+
+ ip link set dev vx10 down
+ ip link del dev vx10
+
+ ip route del 10.0.0.2/32 nexthop via 192.0.2.2
+ ip address del dev $rp1 192.0.2.1/24
+ ip link set dev $rp1 down
+
+ ip link set dev br1 down
+ ip link del dev br1
+}
+
+spine_create()
+{
+ vrf_create "vrf-spine"
+ ip link set dev $rp2 master vrf-spine
+ ip link set dev v1 master vrf-spine
+ ip link set dev vrf-spine up
+ ip link set dev $rp2 up
+ ip link set dev v1 up
+
+ ip address add 192.0.2.2/24 dev $rp2
+ ip address add 192.0.3.2/24 dev v1
+
+ ip route add 10.0.0.1/32 vrf vrf-spine nexthop via 192.0.2.1
+ ip route add 10.0.0.2/32 vrf vrf-spine nexthop via 192.0.3.1
+}
+
+spine_destroy()
+{
+ ip route del 10.0.0.2/32 vrf vrf-spine nexthop via 192.0.3.1
+ ip route del 10.0.0.1/32 vrf vrf-spine nexthop via 192.0.2.1
+
+ ip address del 192.0.3.2/24 dev v1
+ ip address del 192.0.2.2/24 dev $rp2
+
+ ip link set dev v1 down
+ ip link set dev $rp2 down
+ vrf_destroy "vrf-spine"
+}
+
+ns_h1_create()
+{
+ hx_create "vrf-h1" w2 10.1.1.102 10.1.1.1
+}
+export -f ns_h1_create
+
+ns_h2_create()
+{
+ hx_create "vrf-h2" w4 10.1.2.102 10.1.2.1
+}
+export -f ns_h2_create
+
+ns_switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 0
+ ip link set dev br1 up
+
+ ip link set dev v2 up
+ ip address add dev v2 192.0.3.1/24
+ ip route add 10.0.0.1/32 nexthop via 192.0.3.2
+
+ ip link add name vx10 type vxlan id 1010 \
+ local 10.0.0.2 remote 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx10 up
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 1020 \
+ local 10.0.0.2 remote 10.0.0.1 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx20 up
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ ip link add name vx4001 type vxlan id 104001 \
+ local 10.0.0.2 dstport 4789 \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx4001 up
+
+ ip link set dev vx4001 master br1
+ bridge vlan add vid 4001 dev vx4001 pvid untagged
+
+ ip link set dev w1 master br1
+ ip link set dev w1 up
+ bridge vlan add vid 10 dev w1 pvid untagged
+
+ ip link set dev w3 master br1
+ ip link set dev w3 up
+ bridge vlan add vid 20 dev w3 pvid untagged
+
+ ip address add 10.0.0.2/32 dev lo
+
+ # Create SVIs
+ vrf_create "vrf-green"
+ ip link set dev vrf-green up
+
+ ip link add link br1 name vlan10 up master vrf-green type vlan id 10
+ ip address add 10.1.1.12/24 dev vlan10
+ ip link add link vlan10 name vlan10-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.1.1/24 dev vlan10-v
+
+ ip link add link br1 name vlan20 up master vrf-green type vlan id 20
+ ip address add 10.1.2.12/24 dev vlan20
+ ip link add link vlan20 name vlan20-v up master vrf-green \
+ address 00:00:5e:00:01:01 type macvlan mode private
+ ip address add 10.1.2.1/24 dev vlan20-v
+
+ ip link add link br1 name vlan4001 up master vrf-green \
+ type vlan id 4001
+
+ bridge vlan add vid 10 dev br1 self
+ bridge vlan add vid 20 dev br1 self
+ bridge vlan add vid 4001 dev br1 self
+
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 10
+ bridge fdb add 00:00:5e:00:01:01 dev br1 self local vlan 20
+}
+export -f ns_switch_create
+
+ns_init()
+{
+ ip link add name w1 type veth peer name w2
+ ip link add name w3 type veth peer name w4
+
+ ip link set dev lo up
+
+ ns_h1_create
+ ns_h2_create
+ ns_switch_create
+}
+export -f ns_init
+
+ns1_create()
+{
+ ip netns add ns1
+ ip link set dev v2 netns ns1
+ in_ns ns1 ns_init
+}
+
+ns1_destroy()
+{
+ ip netns exec ns1 ip link set dev v2 netns 1
+ ip netns del ns1
+}
+
+__l2_vni_init()
+{
+ local mac1=$1; shift
+ local mac2=$1; shift
+ local ip1=$1; shift
+ local ip2=$1; shift
+ local dst=$1; shift
+
+ bridge fdb add $mac1 dev vx10 self master extern_learn static \
+ dst $dst vlan 10
+ bridge fdb add $mac2 dev vx20 self master extern_learn static \
+ dst $dst vlan 20
+
+ ip neigh add $ip1 lladdr $mac1 nud noarp dev vlan10 \
+ extern_learn
+ ip neigh add $ip2 lladdr $mac2 nud noarp dev vlan20 \
+ extern_learn
+}
+export -f __l2_vni_init
+
+l2_vni_init()
+{
+ local h1_ns_mac=$(in_ns ns1 mac_get w2)
+ local h2_ns_mac=$(in_ns ns1 mac_get w4)
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
+
+ __l2_vni_init $h1_ns_mac $h2_ns_mac 10.1.1.102 10.1.2.102 10.0.0.2
+ in_ns ns1 __l2_vni_init $h1_mac $h2_mac 10.1.1.101 10.1.2.101 10.0.0.1
+}
+
+__l3_vni_init()
+{
+ local mac=$1; shift
+ local vtep_ip=$1; shift
+ local host1_ip=$1; shift
+ local host2_ip=$1; shift
+
+ bridge fdb add $mac dev vx4001 self master extern_learn static \
+ dst $vtep_ip vlan 4001
+
+ ip neigh add $vtep_ip lladdr $mac nud noarp dev vlan4001 extern_learn
+
+ ip route add $host1_ip/32 vrf vrf-green nexthop via $vtep_ip \
+ dev vlan4001 onlink
+ ip route add $host2_ip/32 vrf vrf-green nexthop via $vtep_ip \
+ dev vlan4001 onlink
+}
+export -f __l3_vni_init
+
+l3_vni_init()
+{
+ local vlan4001_ns_mac=$(in_ns ns1 mac_get vlan4001)
+ local vlan4001_mac=$(mac_get vlan4001)
+
+ __l3_vni_init $vlan4001_ns_mac 10.0.0.2 10.1.1.102 10.1.2.102
+ in_ns ns1 __l3_vni_init $vlan4001_mac 10.0.0.1 10.1.1.101 10.1.2.101
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ rp1=${NETIFS[p5]}
+ rp2=${NETIFS[p6]}
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+ switch_create
+
+ ip link add name v1 type veth peer name v2
+ spine_create
+ ns1_create
+
+ l2_vni_init
+ l3_vni_init
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ns1_destroy
+ spine_destroy
+ ip link del dev v1
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 10.1.2.101 ": local->local vid 10->vid 20"
+ ping_test $h1 10.1.1.102 ": local->remote vid 10->vid 10"
+ ping_test $h2 10.1.2.102 ": local->remote vid 20->vid 20"
+ ping_test $h1 10.1.2.102 ": local->remote vid 10->vid 20"
+ ping_test $h2 10.1.1.102 ": local->remote vid 20->vid 10"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/ip_defrag.c b/tools/testing/selftests/net/ip_defrag.c
index 5d56cc0838f6..c0c9ecb891e1 100644
--- a/tools/testing/selftests/net/ip_defrag.c
+++ b/tools/testing/selftests/net/ip_defrag.c
@@ -20,6 +20,7 @@ static bool cfg_do_ipv4;
static bool cfg_do_ipv6;
static bool cfg_verbose;
static bool cfg_overlap;
+static bool cfg_permissive;
static unsigned short cfg_port = 9000;
const struct in_addr addr4 = { .s_addr = __constant_htonl(INADDR_LOOPBACK + 2) };
@@ -35,7 +36,7 @@ const struct in6_addr addr6 = IN6ADDR_LOOPBACK_INIT;
static int payload_len;
static int max_frag_len;
-#define MSG_LEN_MAX 60000 /* Max UDP payload length. */
+#define MSG_LEN_MAX 10000 /* Max UDP payload length. */
#define IP4_MF (1u << 13) /* IPv4 MF flag. */
#define IP6_MF (1) /* IPv6 MF flag. */
@@ -59,13 +60,14 @@ static void recv_validate_udp(int fd_udp)
msg_counter++;
if (cfg_overlap) {
- if (ret != -1)
- error(1, 0, "recv: expected timeout; got %d",
- (int)ret);
- if (errno != ETIMEDOUT && errno != EAGAIN)
- error(1, errno, "recv: expected timeout: %d",
- errno);
- return; /* OK */
+ if (ret == -1 && (errno == ETIMEDOUT || errno == EAGAIN))
+ return; /* OK */
+ if (!cfg_permissive) {
+ if (ret != -1)
+ error(1, 0, "recv: expected timeout; got %d",
+ (int)ret);
+ error(1, errno, "recv: expected timeout: %d", errno);
+ }
}
if (ret == -1)
@@ -203,7 +205,6 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
{
struct ip *iphdr = (struct ip *)ip_frame;
struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame;
- const bool ipv4 = !ipv6;
int res;
int offset;
int frag_len;
@@ -251,7 +252,7 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
}
/* Occasionally test IPv4 "runs" (see net/ipv4/ip_fragment.c) */
- if (ipv4 && !cfg_overlap && (rand() % 100 < 20) &&
+ if (!cfg_overlap && (rand() % 100 < 20) &&
(payload_len > 9 * max_frag_len)) {
offset = 6 * max_frag_len;
while (offset < (UDP_HLEN + payload_len)) {
@@ -276,41 +277,38 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
while (offset < (UDP_HLEN + payload_len)) {
send_fragment(fd_raw, addr, alen, offset, ipv6);
/* IPv4 ignores duplicates, so randomly send a duplicate. */
- if (ipv4 && (1 == rand() % 100))
+ if (rand() % 100 == 1)
send_fragment(fd_raw, addr, alen, offset, ipv6);
offset += 2 * max_frag_len;
}
if (cfg_overlap) {
- /* Send an extra random fragment. */
+ /* Send an extra random fragment.
+ *
+ * Duplicates and some fragments completely inside
+ * previously sent fragments are dropped/ignored. So
+ * random offset and frag_len can result in a dropped
+ * fragment instead of a dropped queue/packet. Thus we
+ * hard-code offset and frag_len.
+ */
+ if (max_frag_len * 4 < payload_len || max_frag_len < 16) {
+ /* not enough payload for random offset and frag_len. */
+ offset = 8;
+ frag_len = UDP_HLEN + max_frag_len;
+ } else {
+ offset = rand() % (payload_len / 2);
+ frag_len = 2 * max_frag_len + 1 + rand() % 256;
+ }
if (ipv6) {
struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN);
/* sendto() returns EINVAL if offset + frag_len is too small. */
- offset = rand() % (UDP_HLEN + payload_len - 1);
- frag_len = max_frag_len + rand() % 256;
/* In IPv6 if !!(frag_len % 8), the fragment is dropped. */
frag_len &= ~0x7;
fraghdr->ip6f_offlg = htons(offset / 8 | IP6_MF);
ip6hdr->ip6_plen = htons(frag_len);
frag_len += IP6_HLEN;
} else {
- /* In IPv4, duplicates and some fragments completely inside
- * previously sent fragments are dropped/ignored. So
- * random offset and frag_len can result in a dropped
- * fragment instead of a dropped queue/packet. So we
- * hard-code offset and frag_len.
- *
- * See ade446403bfb ("net: ipv4: do not handle duplicate
- * fragments as overlapping").
- */
- if (max_frag_len * 4 < payload_len || max_frag_len < 16) {
- /* not enough payload to play with random offset and frag_len. */
- offset = 8;
- frag_len = IP4_HLEN + UDP_HLEN + max_frag_len;
- } else {
- offset = rand() % (payload_len / 2);
- frag_len = 2 * max_frag_len + 1 + rand() % 256;
- }
+ frag_len += IP4_HLEN;
iphdr->ip_off = htons(offset / 8 | IP4_MF);
iphdr->ip_len = htons(frag_len);
}
@@ -327,7 +325,7 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
while (offset < (UDP_HLEN + payload_len)) {
send_fragment(fd_raw, addr, alen, offset, ipv6);
/* IPv4 ignores duplicates, so randomly send a duplicate. */
- if (ipv4 && (1 == rand() % 100))
+ if (rand() % 100 == 1)
send_fragment(fd_raw, addr, alen, offset, ipv6);
offset += 2 * max_frag_len;
}
@@ -342,7 +340,7 @@ static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6)
*/
struct timeval tv = { .tv_sec = 1, .tv_usec = 10 };
int idx;
- int min_frag_len = ipv6 ? 1280 : 8;
+ int min_frag_len = 8;
/* Initialize the payload. */
for (idx = 0; idx < MSG_LEN_MAX; ++idx)
@@ -434,7 +432,7 @@ static void parse_opts(int argc, char **argv)
{
int c;
- while ((c = getopt(argc, argv, "46ov")) != -1) {
+ while ((c = getopt(argc, argv, "46opv")) != -1) {
switch (c) {
case '4':
cfg_do_ipv4 = true;
@@ -445,6 +443,9 @@ static void parse_opts(int argc, char **argv)
case 'o':
cfg_overlap = true;
break;
+ case 'p':
+ cfg_permissive = true;
+ break;
case 'v':
cfg_verbose = true;
break;
diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh
index 7dd79a9efb17..15d3489ecd9c 100755
--- a/tools/testing/selftests/net/ip_defrag.sh
+++ b/tools/testing/selftests/net/ip_defrag.sh
@@ -20,6 +20,10 @@ setup() {
ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1
ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_time=1 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.netfilter.nf_conntrack_frag6_high_thresh=9000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.netfilter.nf_conntrack_frag6_low_thresh=7000000 >/dev/null 2>&1
+ ip netns exec "${NETNS}" sysctl -w net.netfilter.nf_conntrack_frag6_timeout=1 >/dev/null 2>&1
+
# DST cache can get full with a lot of frags, with GC not keeping up with the test.
ip netns exec "${NETNS}" sysctl -w net.ipv6.route.max_size=65536 >/dev/null 2>&1
}
@@ -43,4 +47,16 @@ ip netns exec "${NETNS}" ./ip_defrag -6
echo "ipv6 defrag with overlaps"
ip netns exec "${NETNS}" ./ip_defrag -6o
+# insert an nf_conntrack rule so that the codepath in nf_conntrack_reasm.c taken
+ip netns exec "${NETNS}" ip6tables -A INPUT -m conntrack --ctstate INVALID -j ACCEPT
+
+echo "ipv6 nf_conntrack defrag"
+ip netns exec "${NETNS}" ./ip_defrag -6
+
+echo "ipv6 nf_conntrack defrag with overlaps"
+# netfilter will drop some invalid packets, so we run the test in
+# permissive mode: i.e. pass the test if the packet is correctly assembled
+# even if we sent an overlap
+ip netns exec "${NETNS}" ./ip_defrag -6op
+
echo "all tests done"
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index 912b2dc50be3..317dafcd605d 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -116,6 +116,10 @@
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
+PAUSE_ON_FAIL=no
+VERBOSE=0
+TRACING=0
+
# Some systems don't have a ping6 binary anymore
which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
@@ -204,8 +208,8 @@ tunnel6_a_addr="fd00:2::a"
tunnel6_b_addr="fd00:2::b"
tunnel6_mask="64"
-dummy6_0_addr="fc00:1000::0"
-dummy6_1_addr="fc00:1001::0"
+dummy6_0_prefix="fc00:1000::"
+dummy6_1_prefix="fc00:1001::"
dummy6_mask="64"
cleanup_done=1
@@ -222,6 +226,23 @@ err_flush() {
err_buf=
}
+run_cmd() {
+ cmd="$*"
+
+ if [ "$VERBOSE" = "1" ]; then
+ printf " COMMAND: $cmd\n"
+ fi
+
+ out="$($cmd 2>&1)"
+ rc=$?
+ if [ "$VERBOSE" = "1" -a -n "$out" ]; then
+ echo " $out"
+ echo
+ fi
+
+ return $rc
+}
+
# Find the auto-generated name for this namespace
nsname() {
eval echo \$NS_$1
@@ -258,22 +279,22 @@ setup_fou_or_gue() {
fi
fi
- ${ns_a} ip fou add port 5555 ipproto ${ipproto} || return 2
- ${ns_a} ip link add ${encap}_a type ${type} ${mode} local ${a_addr} remote ${b_addr} encap ${encap} encap-sport auto encap-dport 5556 || return 2
+ run_cmd ${ns_a} ip fou add port 5555 ipproto ${ipproto} || return 2
+ run_cmd ${ns_a} ip link add ${encap}_a type ${type} ${mode} local ${a_addr} remote ${b_addr} encap ${encap} encap-sport auto encap-dport 5556 || return 2
- ${ns_b} ip fou add port 5556 ipproto ${ipproto}
- ${ns_b} ip link add ${encap}_b type ${type} ${mode} local ${b_addr} remote ${a_addr} encap ${encap} encap-sport auto encap-dport 5555
+ run_cmd ${ns_b} ip fou add port 5556 ipproto ${ipproto}
+ run_cmd ${ns_b} ip link add ${encap}_b type ${type} ${mode} local ${b_addr} remote ${a_addr} encap ${encap} encap-sport auto encap-dport 5555
if [ "${inner}" = "4" ]; then
- ${ns_a} ip addr add ${tunnel4_a_addr}/${tunnel4_mask} dev ${encap}_a
- ${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${encap}_b
+ run_cmd ${ns_a} ip addr add ${tunnel4_a_addr}/${tunnel4_mask} dev ${encap}_a
+ run_cmd ${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${encap}_b
else
- ${ns_a} ip addr add ${tunnel6_a_addr}/${tunnel6_mask} dev ${encap}_a
- ${ns_b} ip addr add ${tunnel6_b_addr}/${tunnel6_mask} dev ${encap}_b
+ run_cmd ${ns_a} ip addr add ${tunnel6_a_addr}/${tunnel6_mask} dev ${encap}_a
+ run_cmd ${ns_b} ip addr add ${tunnel6_b_addr}/${tunnel6_mask} dev ${encap}_b
fi
- ${ns_a} ip link set ${encap}_a up
- ${ns_b} ip link set ${encap}_b up
+ run_cmd ${ns_a} ip link set ${encap}_a up
+ run_cmd ${ns_b} ip link set ${encap}_b up
}
setup_fou44() {
@@ -319,17 +340,17 @@ setup_namespaces() {
}
setup_veth() {
- ${ns_a} ip link add veth_a type veth peer name veth_b || return 1
- ${ns_a} ip link set veth_b netns ${NS_B}
+ run_cmd ${ns_a} ip link add veth_a type veth peer name veth_b || return 1
+ run_cmd ${ns_a} ip link set veth_b netns ${NS_B}
- ${ns_a} ip addr add ${veth4_a_addr}/${veth4_mask} dev veth_a
- ${ns_b} ip addr add ${veth4_b_addr}/${veth4_mask} dev veth_b
+ run_cmd ${ns_a} ip addr add ${veth4_a_addr}/${veth4_mask} dev veth_a
+ run_cmd ${ns_b} ip addr add ${veth4_b_addr}/${veth4_mask} dev veth_b
- ${ns_a} ip addr add ${veth6_a_addr}/${veth6_mask} dev veth_a
- ${ns_b} ip addr add ${veth6_b_addr}/${veth6_mask} dev veth_b
+ run_cmd ${ns_a} ip addr add ${veth6_a_addr}/${veth6_mask} dev veth_a
+ run_cmd ${ns_b} ip addr add ${veth6_b_addr}/${veth6_mask} dev veth_b
- ${ns_a} ip link set veth_a up
- ${ns_b} ip link set veth_b up
+ run_cmd ${ns_a} ip link set veth_a up
+ run_cmd ${ns_b} ip link set veth_b up
}
setup_vti() {
@@ -342,14 +363,14 @@ setup_vti() {
[ ${proto} -eq 6 ] && vti_type="vti6" || vti_type="vti"
- ${ns_a} ip link add vti${proto}_a type ${vti_type} local ${veth_a_addr} remote ${veth_b_addr} key 10 || return 1
- ${ns_b} ip link add vti${proto}_b type ${vti_type} local ${veth_b_addr} remote ${veth_a_addr} key 10
+ run_cmd ${ns_a} ip link add vti${proto}_a type ${vti_type} local ${veth_a_addr} remote ${veth_b_addr} key 10 || return 1
+ run_cmd ${ns_b} ip link add vti${proto}_b type ${vti_type} local ${veth_b_addr} remote ${veth_a_addr} key 10
- ${ns_a} ip addr add ${vti_a_addr}/${vti_mask} dev vti${proto}_a
- ${ns_b} ip addr add ${vti_b_addr}/${vti_mask} dev vti${proto}_b
+ run_cmd ${ns_a} ip addr add ${vti_a_addr}/${vti_mask} dev vti${proto}_a
+ run_cmd ${ns_b} ip addr add ${vti_b_addr}/${vti_mask} dev vti${proto}_b
- ${ns_a} ip link set vti${proto}_a up
- ${ns_b} ip link set vti${proto}_b up
+ run_cmd ${ns_a} ip link set vti${proto}_a up
+ run_cmd ${ns_b} ip link set vti${proto}_b up
}
setup_vti4() {
@@ -375,17 +396,17 @@ setup_vxlan_or_geneve() {
opts_b=""
fi
- ${ns_a} ip link add ${type}_a type ${type} id 1 ${opts_a} remote ${b_addr} ${opts} || return 1
- ${ns_b} ip link add ${type}_b type ${type} id 1 ${opts_b} remote ${a_addr} ${opts}
+ run_cmd ${ns_a} ip link add ${type}_a type ${type} id 1 ${opts_a} remote ${b_addr} ${opts} || return 1
+ run_cmd ${ns_b} ip link add ${type}_b type ${type} id 1 ${opts_b} remote ${a_addr} ${opts}
- ${ns_a} ip addr add ${tunnel4_a_addr}/${tunnel4_mask} dev ${type}_a
- ${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${type}_b
+ run_cmd ${ns_a} ip addr add ${tunnel4_a_addr}/${tunnel4_mask} dev ${type}_a
+ run_cmd ${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${type}_b
- ${ns_a} ip addr add ${tunnel6_a_addr}/${tunnel6_mask} dev ${type}_a
- ${ns_b} ip addr add ${tunnel6_b_addr}/${tunnel6_mask} dev ${type}_b
+ run_cmd ${ns_a} ip addr add ${tunnel6_a_addr}/${tunnel6_mask} dev ${type}_a
+ run_cmd ${ns_b} ip addr add ${tunnel6_b_addr}/${tunnel6_mask} dev ${type}_b
- ${ns_a} ip link set ${type}_a up
- ${ns_b} ip link set ${type}_b up
+ run_cmd ${ns_a} ip link set ${type}_a up
+ run_cmd ${ns_b} ip link set ${type}_b up
}
setup_geneve4() {
@@ -409,15 +430,15 @@ setup_xfrm() {
veth_a_addr="${2}"
veth_b_addr="${3}"
- ${ns_a} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead "rfc4106(gcm(aes))" 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel || return 1
- ${ns_a} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead "rfc4106(gcm(aes))" 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
- ${ns_a} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel
- ${ns_a} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel
+ run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel || return 1
+ run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
+ run_cmd ${ns_a} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel
+ run_cmd ${ns_a} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel
- ${ns_b} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead "rfc4106(gcm(aes))" 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
- ${ns_b} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead "rfc4106(gcm(aes))" 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
- ${ns_b} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel
- ${ns_b} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel
+ run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
+ run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel
+ run_cmd ${ns_b} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel
+ run_cmd ${ns_b} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel
}
setup_xfrm4() {
@@ -481,7 +502,7 @@ setup() {
}
trace() {
- [ $tracing -eq 0 ] && return
+ [ $TRACING -eq 0 ] && return
for arg do
[ "${ns_cmd}" = "" ] && ns_cmd="${arg}" && continue
@@ -597,8 +618,8 @@ test_pmtu_ipvX() {
mtu "${ns_b}" veth_B-R2 1500
# Create route exceptions
- ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1} > /dev/null
- ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2} > /dev/null
+ run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1}
+ run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2}
# Check that exceptions have been created with the correct PMTU
pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})"
@@ -630,7 +651,7 @@ test_pmtu_ipvX() {
# Decrease remote MTU on path via R2, get new exception
mtu "${ns_r2}" veth_R2-B 400
mtu "${ns_b}" veth_B-R2 400
- ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2} > /dev/null
+ run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2}
pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1
@@ -647,7 +668,7 @@ test_pmtu_ipvX() {
check_pmtu_value "1500" "${pmtu_2}" "increasing local MTU" || return 1
# Get new exception
- ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2} > /dev/null
+ run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2}
pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1
}
@@ -696,7 +717,7 @@ test_pmtu_ipvX_over_vxlanY_or_geneveY_exception() {
mtu "${ns_a}" ${type}_a $((${ll_mtu} + 1000))
mtu "${ns_b}" ${type}_b $((${ll_mtu} + 1000))
- ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
+ run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst}
# Check that exception was created
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})"
@@ -776,7 +797,7 @@ test_pmtu_ipvX_over_fouY_or_gueY() {
mtu "${ns_a}" ${encap}_a $((${ll_mtu} + 1000))
mtu "${ns_b}" ${encap}_b $((${ll_mtu} + 1000))
- ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
+ run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst}
# Check that exception was created
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})"
@@ -834,13 +855,13 @@ test_pmtu_vti4_exception() {
# Send DF packet without exceeding link layer MTU, check that no
# exception is created
- ${ns_a} ping -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel4_b_addr} > /dev/null
+ run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel4_b_addr}
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
# Now exceed link layer MTU by one byte, check that exception is created
# with the right PMTU value
- ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((ping_payload + 1)) ${tunnel4_b_addr} > /dev/null
+ run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((ping_payload + 1)) ${tunnel4_b_addr}
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
check_pmtu_value "${esp_payload_rfc4106}" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106 + 1)))"
}
@@ -856,7 +877,7 @@ test_pmtu_vti6_exception() {
mtu "${ns_b}" veth_b 4000
mtu "${ns_a}" vti6_a 5000
mtu "${ns_b}" vti6_b 5000
- ${ns_a} ${ping6} -q -i 0.1 -w 1 -s 60000 ${tunnel6_b_addr} > /dev/null
+ run_cmd ${ns_a} ${ping6} -q -i 0.1 -w 1 -s 60000 ${tunnel6_b_addr}
# Check that exception was created
pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
@@ -902,9 +923,9 @@ test_pmtu_vti6_default_mtu() {
test_pmtu_vti4_link_add_mtu() {
setup namespaces || return 2
- ${ns_a} ip link add vti4_a type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10
+ run_cmd ${ns_a} ip link add vti4_a type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10
[ $? -ne 0 ] && err " vti not supported" && return 2
- ${ns_a} ip link del vti4_a
+ run_cmd ${ns_a} ip link del vti4_a
fail=0
@@ -912,7 +933,7 @@ test_pmtu_vti4_link_add_mtu() {
max=$((65535 - 20))
# Check invalid values first
for v in $((min - 1)) $((max + 1)); do
- ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10 2>/dev/null
+ run_cmd ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10
# This can fail, or MTU can be adjusted to a proper value
[ $? -ne 0 ] && continue
mtu="$(link_get_mtu "${ns_a}" vti4_a)"
@@ -920,14 +941,14 @@ test_pmtu_vti4_link_add_mtu() {
err " vti tunnel created with invalid MTU ${mtu}"
fail=1
fi
- ${ns_a} ip link del vti4_a
+ run_cmd ${ns_a} ip link del vti4_a
done
# Now check valid values
for v in ${min} 1300 ${max}; do
- ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10
+ run_cmd ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10
mtu="$(link_get_mtu "${ns_a}" vti4_a)"
- ${ns_a} ip link del vti4_a
+ run_cmd ${ns_a} ip link del vti4_a
if [ "${mtu}" != "${v}" ]; then
err " vti MTU ${mtu} doesn't match configured value ${v}"
fail=1
@@ -940,9 +961,9 @@ test_pmtu_vti4_link_add_mtu() {
test_pmtu_vti6_link_add_mtu() {
setup namespaces || return 2
- ${ns_a} ip link add vti6_a type vti6 local ${veth6_a_addr} remote ${veth6_b_addr} key 10
+ run_cmd ${ns_a} ip link add vti6_a type vti6 local ${veth6_a_addr} remote ${veth6_b_addr} key 10
[ $? -ne 0 ] && err " vti6 not supported" && return 2
- ${ns_a} ip link del vti6_a
+ run_cmd ${ns_a} ip link del vti6_a
fail=0
@@ -950,7 +971,7 @@ test_pmtu_vti6_link_add_mtu() {
max=$((65535 - 40))
# Check invalid values first
for v in $((min - 1)) $((max + 1)); do
- ${ns_a} ip link add vti6_a mtu ${v} type vti6 local ${veth6_a_addr} remote ${veth6_b_addr} key 10 2>/dev/null
+ run_cmd ${ns_a} ip link add vti6_a mtu ${v} type vti6 local ${veth6_a_addr} remote ${veth6_b_addr} key 10
# This can fail, or MTU can be adjusted to a proper value
[ $? -ne 0 ] && continue
mtu="$(link_get_mtu "${ns_a}" vti6_a)"
@@ -958,14 +979,14 @@ test_pmtu_vti6_link_add_mtu() {
err " vti6 tunnel created with invalid MTU ${v}"
fail=1
fi
- ${ns_a} ip link del vti6_a
+ run_cmd ${ns_a} ip link del vti6_a
done
# Now check valid values
for v in 68 1280 1300 $((65535 - 40)); do
- ${ns_a} ip link add vti6_a mtu ${v} type vti6 local ${veth6_a_addr} remote ${veth6_b_addr} key 10
+ run_cmd ${ns_a} ip link add vti6_a mtu ${v} type vti6 local ${veth6_a_addr} remote ${veth6_b_addr} key 10
mtu="$(link_get_mtu "${ns_a}" vti6_a)"
- ${ns_a} ip link del vti6_a
+ run_cmd ${ns_a} ip link del vti6_a
if [ "${mtu}" != "${v}" ]; then
err " vti6 MTU ${mtu} doesn't match configured value ${v}"
fail=1
@@ -978,19 +999,19 @@ test_pmtu_vti6_link_add_mtu() {
test_pmtu_vti6_link_change_mtu() {
setup namespaces || return 2
- ${ns_a} ip link add dummy0 mtu 1500 type dummy
+ run_cmd ${ns_a} ip link add dummy0 mtu 1500 type dummy
[ $? -ne 0 ] && err " dummy not supported" && return 2
- ${ns_a} ip link add dummy1 mtu 3000 type dummy
- ${ns_a} ip link set dummy0 up
- ${ns_a} ip link set dummy1 up
+ run_cmd ${ns_a} ip link add dummy1 mtu 3000 type dummy
+ run_cmd ${ns_a} ip link set dummy0 up
+ run_cmd ${ns_a} ip link set dummy1 up
- ${ns_a} ip addr add ${dummy6_0_addr}/${dummy6_mask} dev dummy0
- ${ns_a} ip addr add ${dummy6_1_addr}/${dummy6_mask} dev dummy1
+ run_cmd ${ns_a} ip addr add ${dummy6_0_prefix}1/${dummy6_mask} dev dummy0
+ run_cmd ${ns_a} ip addr add ${dummy6_1_prefix}1/${dummy6_mask} dev dummy1
fail=0
# Create vti6 interface bound to device, passing MTU, check it
- ${ns_a} ip link add vti6_a mtu 1300 type vti6 remote ${dummy6_0_addr} local ${dummy6_0_addr}
+ run_cmd ${ns_a} ip link add vti6_a mtu 1300 type vti6 remote ${dummy6_0_prefix}2 local ${dummy6_0_prefix}1
mtu="$(link_get_mtu "${ns_a}" vti6_a)"
if [ ${mtu} -ne 1300 ]; then
err " vti6 MTU ${mtu} doesn't match configured value 1300"
@@ -999,7 +1020,7 @@ test_pmtu_vti6_link_change_mtu() {
# Move to another device with different MTU, without passing MTU, check
# MTU is adjusted
- ${ns_a} ip link set vti6_a type vti6 remote ${dummy6_1_addr} local ${dummy6_1_addr}
+ run_cmd ${ns_a} ip link set vti6_a type vti6 remote ${dummy6_1_prefix}2 local ${dummy6_1_prefix}1
mtu="$(link_get_mtu "${ns_a}" vti6_a)"
if [ ${mtu} -ne $((3000 - 40)) ]; then
err " vti MTU ${mtu} is not dummy MTU 3000 minus IPv6 header length"
@@ -1007,7 +1028,7 @@ test_pmtu_vti6_link_change_mtu() {
fi
# Move it back, passing MTU, check MTU is not overridden
- ${ns_a} ip link set vti6_a mtu 1280 type vti6 remote ${dummy6_0_addr} local ${dummy6_0_addr}
+ run_cmd ${ns_a} ip link set vti6_a mtu 1280 type vti6 remote ${dummy6_0_prefix}2 local ${dummy6_0_prefix}1
mtu="$(link_get_mtu "${ns_a}" vti6_a)"
if [ ${mtu} -ne 1280 ]; then
err " vti6 MTU ${mtu} doesn't match configured value 1280"
@@ -1052,7 +1073,7 @@ test_cleanup_vxlanX_exception() {
# Fill exception cache for multiple CPUs (2)
# we can always use inner IPv4 for that
for cpu in ${cpu_list}; do
- taskset --cpu-list ${cpu} ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${tunnel4_b_addr} > /dev/null
+ run_cmd taskset --cpu-list ${cpu} ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${tunnel4_b_addr}
done
${ns_a} ip link del dev veth_A-R1 &
@@ -1084,29 +1105,33 @@ usage() {
exit 1
}
+################################################################################
+#
exitcode=0
desc=0
+
+while getopts :ptv o
+do
+ case $o in
+ p) PAUSE_ON_FAIL=yes;;
+ v) VERBOSE=1;;
+ t) if which tcpdump > /dev/null 2>&1; then
+ TRACING=1
+ else
+ echo "=== tcpdump not available, tracing disabled"
+ fi
+ ;;
+ *) usage;;
+ esac
+done
+shift $(($OPTIND-1))
+
IFS="
"
-tracing=0
for arg do
- if [ "${arg}" != "${arg#--*}" ]; then
- opt="${arg#--}"
- if [ "${opt}" = "trace" ]; then
- if which tcpdump > /dev/null 2>&1; then
- tracing=1
- else
- echo "=== tcpdump not available, tracing disabled"
- fi
- else
- usage
- fi
- else
- # Check first that all requested tests are available before
- # running any
- command -v > /dev/null "test_${arg}" || { echo "=== Test ${arg} not found"; usage; }
- fi
+ # Check first that all requested tests are available before running any
+ command -v > /dev/null "test_${arg}" || { echo "=== Test ${arg} not found"; usage; }
done
trap cleanup EXIT
@@ -1124,6 +1149,11 @@ for t in ${tests}; do
(
unset IFS
+
+ if [ "$VERBOSE" = "1" ]; then
+ printf "\n##########################################################################\n\n"
+ fi
+
eval test_${name}
ret=$?
cleanup
@@ -1132,6 +1162,11 @@ for t in ${tests}; do
printf "TEST: %-60s [ OK ]\n" "${t}"
elif [ $ret -eq 1 ]; then
printf "TEST: %-60s [FAIL]\n" "${t}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "Pausing. Hit enter to continue"
+ read a
+ fi
err_flush
exit 1
elif [ $ret -eq 2 ]; then
diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
index bd9b9632c72b..8c8c7d79c38d 100644
--- a/tools/testing/selftests/net/psock_fanout.c
+++ b/tools/testing/selftests/net/psock_fanout.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2013 Google Inc.
* Author: Willem de Bruijn (willemb@google.com)
@@ -24,21 +25,6 @@
*
* Todo:
* - functionality: PACKET_FANOUT_FLAG_DEFRAG
- *
- * License (GPLv2):
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#define _GNU_SOURCE /* for sched_setaffinity */
diff --git a/tools/testing/selftests/net/psock_lib.h b/tools/testing/selftests/net/psock_lib.h
index 7d990d6c861b..faa884385c45 100644
--- a/tools/testing/selftests/net/psock_lib.h
+++ b/tools/testing/selftests/net/psock_lib.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2013 Google Inc.
* Author: Willem de Bruijn <willemb@google.com>
* Daniel Borkmann <dborkman@redhat.com>
- *
- * License (GPLv2):
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef PSOCK_LIB_H
diff --git a/tools/testing/selftests/net/psock_tpacket.c b/tools/testing/selftests/net/psock_tpacket.c
index 7ec4fa4d55dc..404a2ce759ab 100644
--- a/tools/testing/selftests/net/psock_tpacket.c
+++ b/tools/testing/selftests/net/psock_tpacket.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2013 Red Hat, Inc.
* Author: Daniel Borkmann <dborkman@redhat.com>
@@ -19,21 +20,6 @@
* - TPACKET_V1: RX_RING, TX_RING
* - TPACKET_V2: RX_RING, TX_RING
* - TPACKET_V3: RX_RING
- *
- * License (GPLv2):
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 78fc593dfe40..b25c9fe019d2 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -391,7 +391,7 @@ kci_test_encap_vxlan()
vlan="test-vlan0"
testns="$1"
- ip netns exec "$testns" ip link add "$vxlan" type vxlan id 42 group 239.1.1.1 \
+ ip -netns "$testns" link add "$vxlan" type vxlan id 42 group 239.1.1.1 \
dev "$devdummy" dstport 4789 2>/dev/null
if [ $? -ne 0 ]; then
echo "FAIL: can't add vxlan interface, skipping test"
@@ -399,16 +399,68 @@ kci_test_encap_vxlan()
fi
check_err $?
- ip netns exec "$testns" ip addr add 10.2.11.49/24 dev "$vxlan"
+ ip -netns "$testns" addr add 10.2.11.49/24 dev "$vxlan"
check_err $?
- ip netns exec "$testns" ip link set up dev "$vxlan"
+ ip -netns "$testns" link set up dev "$vxlan"
check_err $?
- ip netns exec "$testns" ip link add link "$vxlan" name "$vlan" type vlan id 1
+ ip -netns "$testns" link add link "$vxlan" name "$vlan" type vlan id 1
check_err $?
- ip netns exec "$testns" ip link del "$vxlan"
+ # changelink testcases
+ ip -netns "$testns" link set dev "$vxlan" type vxlan vni 43 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan group ffe5::5 dev "$devdummy" 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan ttl inherit 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan ttl 64
+ check_err $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan nolearning
+ check_err $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan proxy 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan norsc 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan l2miss 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan l3miss 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan external 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan udpcsum 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan udp6zerocsumtx 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan udp6zerocsumrx 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan remcsumtx 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan remcsumrx 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan gbp 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link set dev "$vxlan" type vxlan gpe 2>/dev/null
+ check_fail $?
+
+ ip -netns "$testns" link del "$vxlan"
check_err $?
if [ $ret -ne 0 ]; then
@@ -430,19 +482,19 @@ kci_test_encap_fou()
return $ksft_skip
fi
- ip netns exec "$testns" ip fou add port 7777 ipproto 47 2>/dev/null
+ ip -netns "$testns" fou add port 7777 ipproto 47 2>/dev/null
if [ $? -ne 0 ];then
echo "FAIL: can't add fou port 7777, skipping test"
return 1
fi
- ip netns exec "$testns" ip fou add port 8888 ipproto 4
+ ip -netns "$testns" fou add port 8888 ipproto 4
check_err $?
- ip netns exec "$testns" ip fou del port 9999 2>/dev/null
+ ip -netns "$testns" fou del port 9999 2>/dev/null
check_fail $?
- ip netns exec "$testns" ip fou del port 7777
+ ip -netns "$testns" fou del port 7777
check_err $?
if [ $ret -ne 0 ]; then
@@ -465,12 +517,12 @@ kci_test_encap()
return $ksft_skip
fi
- ip netns exec "$testns" ip link set lo up
+ ip -netns "$testns" link set lo up
check_err $?
- ip netns exec "$testns" ip link add name "$devdummy" type dummy
+ ip -netns "$testns" link add name "$devdummy" type dummy
check_err $?
- ip netns exec "$testns" ip link set "$devdummy" up
+ ip -netns "$testns" link set "$devdummy" up
check_err $?
kci_test_encap_vxlan "$testns"
@@ -644,9 +696,9 @@ kci_test_ipsec_offload()
algo="aead rfc4106(gcm(aes)) 0x3132333435363738393031323334353664636261 128"
srcip=192.168.123.3
dstip=192.168.123.4
- dev=simx1
- sysfsd=/sys/kernel/debug/netdevsim/$dev
+ sysfsd=/sys/kernel/debug/netdevsim/netdevsim0/ports/0/
sysfsf=$sysfsd/ipsec
+ sysfsnet=/sys/bus/netdevsim/devices/netdevsim0/net/
# setup netdevsim since dummydev doesn't have offload support
modprobe netdevsim
@@ -656,7 +708,11 @@ kci_test_ipsec_offload()
return 1
fi
- ip link add $dev type netdevsim
+ echo "0" > /sys/bus/netdevsim/new_device
+ while [ ! -d $sysfsnet ] ; do :; done
+ udevadm settle
+ dev=`ls $sysfsnet`
+
ip addr add $srcip dev $dev
ip link set $dev up
if [ ! -d $sysfsd ] ; then
@@ -729,7 +785,6 @@ EOF
fi
# clean up any leftovers
- ip link del $dev
rmmod netdevsim
if [ $ret -ne 0 ]; then
@@ -759,24 +814,24 @@ kci_test_gretap()
fi
# test native tunnel
- ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type gretap seq \
key 102 local 172.16.1.100 remote 172.16.1.200
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test external mode
- ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap external
+ ip -netns "$testns" link add dev "$DEV_NS" type gretap external
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
@@ -809,24 +864,24 @@ kci_test_ip6gretap()
fi
# test native tunnel
- ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type ip6gretap seq \
key 102 local fc00:100::1 remote fc00:100::2
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" fc00:200::1/96
+ ip -netns "$testns" addr add dev "$DEV_NS" fc00:200::1/96
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test external mode
- ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap external
+ ip -netns "$testns" link add dev "$DEV_NS" type ip6gretap external
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
@@ -858,40 +913,40 @@ kci_test_erspan()
fi
# test native tunnel erspan v1
- ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type erspan seq \
key 102 local 172.16.1.100 remote 172.16.1.200 \
erspan_ver 1 erspan 488
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test native tunnel erspan v2
- ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type erspan seq \
key 102 local 172.16.1.100 remote 172.16.1.200 \
erspan_ver 2 erspan_dir ingress erspan_hwid 7
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test external mode
- ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan external
+ ip -netns "$testns" link add dev "$DEV_NS" type erspan external
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
@@ -923,41 +978,41 @@ kci_test_ip6erspan()
fi
# test native tunnel ip6erspan v1
- ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type ip6erspan seq \
key 102 local fc00:100::1 remote fc00:100::2 \
erspan_ver 1 erspan 488
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test native tunnel ip6erspan v2
- ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
+ ip -netns "$testns" link add dev "$DEV_NS" type ip6erspan seq \
key 102 local fc00:100::1 remote fc00:100::2 \
erspan_ver 2 erspan_dir ingress erspan_hwid 7
check_err $?
- ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+ ip -netns "$testns" addr add dev "$DEV_NS" 10.1.1.100/24
check_err $?
- ip netns exec "$testns" ip link set dev $DEV_NS up
+ ip -netns "$testns" link set dev $DEV_NS up
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
# test external mode
- ip netns exec "$testns" ip link add dev "$DEV_NS" \
+ ip -netns "$testns" link add dev "$DEV_NS" \
type ip6erspan external
check_err $?
- ip netns exec "$testns" ip link del "$DEV_NS"
+ ip -netns "$testns" link del "$DEV_NS"
check_err $?
if [ $ret -ne 0 ]; then
diff --git a/tools/testing/selftests/net/run_afpackettests b/tools/testing/selftests/net/run_afpackettests
index 2dc95fda7ef7..ea5938ec009a 100755
--- a/tools/testing/selftests/net/run_afpackettests
+++ b/tools/testing/selftests/net/run_afpackettests
@@ -6,12 +6,14 @@ if [ $(id -u) != 0 ]; then
exit 0
fi
+ret=0
echo "--------------------"
echo "running psock_fanout test"
echo "--------------------"
./in_netns.sh ./psock_fanout
if [ $? -ne 0 ]; then
echo "[FAIL]"
+ ret=1
else
echo "[PASS]"
fi
@@ -22,6 +24,7 @@ echo "--------------------"
./in_netns.sh ./psock_tpacket
if [ $? -ne 0 ]; then
echo "[FAIL]"
+ ret=1
else
echo "[PASS]"
fi
@@ -32,6 +35,8 @@ echo "--------------------"
./in_netns.sh ./txring_overwrite
if [ $? -ne 0 ]; then
echo "[FAIL]"
+ ret=1
else
echo "[PASS]"
fi
+exit $ret
diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests
index b093f39c298c..14e41faf2c57 100755
--- a/tools/testing/selftests/net/run_netsocktests
+++ b/tools/testing/selftests/net/run_netsocktests
@@ -7,7 +7,7 @@ echo "--------------------"
./socket
if [ $? -ne 0 ]; then
echo "[FAIL]"
+ exit 1
else
echo "[PASS]"
fi
-
diff --git a/tools/testing/selftests/net/tcp_inq.c b/tools/testing/selftests/net/tcp_inq.c
index d044b29ddabc..bd6a9c7a3e8a 100644
--- a/tools/testing/selftests/net/tcp_inq.c
+++ b/tools/testing/selftests/net/tcp_inq.c
@@ -1,19 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2018 Google Inc.
* Author: Soheil Hassas Yeganeh (soheil@google.com)
*
* Simple example on how to use TCP_INQ and TCP_CM_INQ.
- *
- * License (GPLv2):
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for
- * more details.
*/
#define _GNU_SOURCE
diff --git a/tools/testing/selftests/net/tcp_mmap.c b/tools/testing/selftests/net/tcp_mmap.c
index e8c5dff448eb..31ced79f4f25 100644
--- a/tools/testing/selftests/net/tcp_mmap.c
+++ b/tools/testing/selftests/net/tcp_mmap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2018 Google Inc.
* Author: Eric Dumazet (edumazet@google.com)
@@ -44,21 +45,6 @@
* cpu usage user:0.046 sys:3.559, 110.016 usec per MB, 65529 c-switches
* received 32768 MB (99.9939 % mmap'ed) in 7.43764 s, 36.9577 Gbit
* cpu usage user:0.035 sys:3.467, 106.873 usec per MB, 65530 c-switches
- *
- * License (GPLv2):
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#define _GNU_SOURCE
#include <pthread.h>
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index fac68d710f35..278c86134556 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -42,7 +42,7 @@ FIXTURE_SETUP(tls)
len = sizeof(addr);
memset(&tls12, 0, sizeof(tls12));
- tls12.info.version = TLS_1_2_VERSION;
+ tls12.info.version = TLS_1_3_VERSION;
tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128;
addr.sin_family = AF_INET;
@@ -442,6 +442,21 @@ TEST_F(tls, multiple_send_single_recv)
EXPECT_EQ(memcmp(send_mem, recv_mem + send_len, send_len), 0);
}
+TEST_F(tls, single_send_multiple_recv_non_align)
+{
+ const unsigned int total_len = 15;
+ const unsigned int recv_len = 10;
+ char recv_mem[recv_len * 2];
+ char send_mem[total_len];
+
+ EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0);
+ memset(recv_mem, 0, total_len);
+
+ EXPECT_EQ(recv(self->cfd, recv_mem, recv_len, 0), recv_len);
+ EXPECT_EQ(recv(self->cfd, recv_mem + recv_len, recv_len, 0), 5);
+ EXPECT_EQ(memcmp(send_mem, recv_mem, total_len), 0);
+}
+
TEST_F(tls, recv_partial)
{
char const *test_str = "test_read_partial";
@@ -452,10 +467,12 @@ TEST_F(tls, recv_partial)
memset(recv_mem, 0, sizeof(recv_mem));
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first), 0), -1);
+ EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first),
+ MSG_WAITALL), -1);
EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0);
memset(recv_mem, 0, sizeof(recv_mem));
- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second), 0), -1);
+ EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second),
+ MSG_WAITALL), -1);
EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)),
0);
}
@@ -565,14 +582,33 @@ TEST_F(tls, recv_peek_large_buf_mult_recs)
len = strlen(test_str_second) + 1;
EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
- len = sizeof(buf);
+ len = strlen(test_str) + 1;
memset(buf, 0, len);
- EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1);
-
+ EXPECT_NE((len = recv(self->cfd, buf, len,
+ MSG_PEEK | MSG_WAITALL)), -1);
len = strlen(test_str) + 1;
EXPECT_EQ(memcmp(test_str, buf, len), 0);
}
+TEST_F(tls, recv_lowat)
+{
+ char send_mem[10] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
+ char recv_mem[20];
+ int lowat = 8;
+
+ EXPECT_EQ(send(self->fd, send_mem, 10, 0), 10);
+ EXPECT_EQ(send(self->fd, send_mem, 5, 0), 5);
+
+ memset(recv_mem, 0, 20);
+ EXPECT_EQ(setsockopt(self->cfd, SOL_SOCKET, SO_RCVLOWAT,
+ &lowat, sizeof(lowat)), 0);
+ EXPECT_EQ(recv(self->cfd, recv_mem, 1, MSG_WAITALL), 1);
+ EXPECT_EQ(recv(self->cfd, recv_mem + 1, 6, MSG_WAITALL), 6);
+ EXPECT_EQ(recv(self->cfd, recv_mem + 7, 10, 0), 8);
+
+ EXPECT_EQ(memcmp(send_mem, recv_mem, 10), 0);
+ EXPECT_EQ(memcmp(send_mem, recv_mem + 10, 5), 0);
+}
TEST_F(tls, pollin)
{
@@ -751,6 +787,20 @@ TEST_F(tls, control_msg)
EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
vec.iov_base = buf;
+ EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL | MSG_PEEK), send_len);
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ EXPECT_NE(cmsg, NULL);
+ EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
+ EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
+ record_type = *((unsigned char *)CMSG_DATA(cmsg));
+ EXPECT_EQ(record_type, 100);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+
+ /* Recv the message again without MSG_PEEK */
+ record_type = 0;
+ memset(buf, 0, sizeof(buf));
+
EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL), send_len);
cmsg = CMSG_FIRSTHDR(&msg);
EXPECT_NE(cmsg, NULL);
@@ -761,4 +811,140 @@ TEST_F(tls, control_msg)
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
+TEST(keysizes) {
+ struct tls12_crypto_info_aes_gcm_256 tls12;
+ struct sockaddr_in addr;
+ int sfd, ret, fd, cfd;
+ socklen_t len;
+ bool notls;
+
+ notls = false;
+ len = sizeof(addr);
+
+ memset(&tls12, 0, sizeof(tls12));
+ tls12.info.version = TLS_1_2_VERSION;
+ tls12.info.cipher_type = TLS_CIPHER_AES_GCM_256;
+
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr.sin_port = 0;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ sfd = socket(AF_INET, SOCK_STREAM, 0);
+
+ ret = bind(sfd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+ ret = listen(sfd, 10);
+ ASSERT_EQ(ret, 0);
+
+ ret = getsockname(sfd, &addr, &len);
+ ASSERT_EQ(ret, 0);
+
+ ret = connect(fd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ if (ret != 0) {
+ notls = true;
+ printf("Failure setting TCP_ULP, testing without tls\n");
+ }
+
+ if (!notls) {
+ ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12,
+ sizeof(tls12));
+ EXPECT_EQ(ret, 0);
+ }
+
+ cfd = accept(sfd, &addr, &len);
+ ASSERT_GE(cfd, 0);
+
+ if (!notls) {
+ ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls",
+ sizeof("tls"));
+ EXPECT_EQ(ret, 0);
+
+ ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12,
+ sizeof(tls12));
+ EXPECT_EQ(ret, 0);
+ }
+
+ close(sfd);
+ close(fd);
+ close(cfd);
+}
+
+TEST(tls12) {
+ int fd, cfd;
+ bool notls;
+
+ struct tls12_crypto_info_aes_gcm_128 tls12;
+ struct sockaddr_in addr;
+ socklen_t len;
+ int sfd, ret;
+
+ notls = false;
+ len = sizeof(addr);
+
+ memset(&tls12, 0, sizeof(tls12));
+ tls12.info.version = TLS_1_2_VERSION;
+ tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128;
+
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr.sin_port = 0;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ sfd = socket(AF_INET, SOCK_STREAM, 0);
+
+ ret = bind(sfd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+ ret = listen(sfd, 10);
+ ASSERT_EQ(ret, 0);
+
+ ret = getsockname(sfd, &addr, &len);
+ ASSERT_EQ(ret, 0);
+
+ ret = connect(fd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ if (ret != 0) {
+ notls = true;
+ printf("Failure setting TCP_ULP, testing without tls\n");
+ }
+
+ if (!notls) {
+ ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12,
+ sizeof(tls12));
+ ASSERT_EQ(ret, 0);
+ }
+
+ cfd = accept(sfd, &addr, &len);
+ ASSERT_GE(cfd, 0);
+
+ if (!notls) {
+ ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls",
+ sizeof("tls"));
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12,
+ sizeof(tls12));
+ ASSERT_EQ(ret, 0);
+ }
+
+ close(sfd);
+
+ char const *test_str = "test_read";
+ int send_len = 10;
+ char buf[10];
+
+ send_len = strlen(test_str) + 1;
+ EXPECT_EQ(send(fd, test_str, send_len, 0), send_len);
+ EXPECT_NE(recv(cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+
+ close(fd);
+ close(cfd);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
index e279051bc631..b8265ee9923f 100644
--- a/tools/testing/selftests/net/udpgso.c
+++ b/tools/testing/selftests/net/udpgso.c
@@ -17,7 +17,6 @@
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
-#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index c9ff2b47bd1c..4144984ebee5 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for netfilter selftests
-TEST_PROGS := nft_trans_stress.sh nft_nat.sh
+TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \
+ conntrack_icmp_related.sh nft_flowtable.sh
include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/bridge_brouter.sh b/tools/testing/selftests/netfilter/bridge_brouter.sh
new file mode 100755
index 000000000000..29f3955b9af7
--- /dev/null
+++ b/tools/testing/selftests/netfilter/bridge_brouter.sh
@@ -0,0 +1,146 @@
+#!/bin/bash
+#
+# This test is for bridge 'brouting', i.e. make some packets being routed
+# rather than getting bridged even though they arrive on interface that is
+# part of a bridge.
+
+# eth0 br0 eth0
+# setup is: ns1 <-> ns0 <-> ns2
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+ebtables -V > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ebtables"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+ip netns add ns0
+ip netns add ns1
+ip netns add ns2
+
+ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
+if [ $? -ne 0 ]; then
+ echo "SKIP: Can't create veth device"
+ exit $ksft_skip
+fi
+ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
+
+ip -net ns0 link set lo up
+ip -net ns0 link set veth0 up
+ip -net ns0 link set veth1 up
+
+ip -net ns0 link add br0 type bridge
+if [ $? -ne 0 ]; then
+ echo "SKIP: Can't create bridge br0"
+ exit $ksft_skip
+fi
+
+ip -net ns0 link set veth0 master br0
+ip -net ns0 link set veth1 master br0
+ip -net ns0 link set br0 up
+ip -net ns0 addr add 10.0.0.1/24 dev br0
+
+# place both in same subnet, ns1 and ns2 connected via ns0:br0
+for i in 1 2; do
+ ip -net ns$i link set lo up
+ ip -net ns$i link set eth0 up
+ ip -net ns$i addr add 10.0.0.1$i/24 dev eth0
+done
+
+test_ebtables_broute()
+{
+ local cipt
+
+ # redirect is needed so the dstmac is rewritten to the bridge itself,
+ # ip stack won't process OTHERHOST (foreign unicast mac) packets.
+ ip netns exec ns0 ebtables -t broute -A BROUTING -p ipv4 --ip-protocol icmp -j redirect --redirect-target=DROP
+ if [ $? -ne 0 ]; then
+ echo "SKIP: Could not add ebtables broute redirect rule"
+ return $ksft_skip
+ fi
+
+ # ping netns1, expected to not work (ip forwarding is off)
+ ip netns exec ns1 ping -q -c 1 10.0.0.12 > /dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ echo "ERROR: ping works, should have failed" 1>&2
+ return 1
+ fi
+
+ # enable forwarding on both interfaces.
+ # neither needs an ip address, but at least the bridge needs
+ # an ip address in same network segment as ns1 and ns2 (ns0
+ # needs to be able to determine route for to-be-forwarded packet).
+ ip netns exec ns0 sysctl -q net.ipv4.conf.veth0.forwarding=1
+ ip netns exec ns0 sysctl -q net.ipv4.conf.veth1.forwarding=1
+
+ sleep 1
+
+ ip netns exec ns1 ping -q -c 1 10.0.0.12 > /dev/null
+ if [ $? -ne 0 ]; then
+ echo "ERROR: ping did not work, but it should (broute+forward)" 1>&2
+ return 1
+ fi
+
+ echo "PASS: ns1/ns2 connectivity with active broute rule"
+ ip netns exec ns0 ebtables -t broute -F
+
+ # ping netns1, expected to work (frames are bridged)
+ ip netns exec ns1 ping -q -c 1 10.0.0.12 > /dev/null
+ if [ $? -ne 0 ]; then
+ echo "ERROR: ping did not work, but it should (bridged)" 1>&2
+ return 1
+ fi
+
+ ip netns exec ns0 ebtables -t filter -A FORWARD -p ipv4 --ip-protocol icmp -j DROP
+
+ # ping netns1, expected to not work (DROP in bridge forward)
+ ip netns exec ns1 ping -q -c 1 10.0.0.12 > /dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ echo "ERROR: ping works, should have failed (icmp forward drop)" 1>&2
+ return 1
+ fi
+
+ # re-activate brouter
+ ip netns exec ns0 ebtables -t broute -A BROUTING -p ipv4 --ip-protocol icmp -j redirect --redirect-target=DROP
+
+ ip netns exec ns2 ping -q -c 1 10.0.0.11 > /dev/null
+ if [ $? -ne 0 ]; then
+ echo "ERROR: ping did not work, but it should (broute+forward 2)" 1>&2
+ return 1
+ fi
+
+ echo "PASS: ns1/ns2 connectivity with active broute rule and bridge forward drop"
+ return 0
+}
+
+# test basic connectivity
+ip netns exec ns1 ping -c 1 -q 10.0.0.12 > /dev/null
+if [ $? -ne 0 ]; then
+ echo "ERROR: Could not reach ns2 from ns1" 1>&2
+ ret=1
+fi
+
+ip netns exec ns2 ping -c 1 -q 10.0.0.11 > /dev/null
+if [ $? -ne 0 ]; then
+ echo "ERROR: Could not reach ns1 from ns2" 1>&2
+ ret=1
+fi
+
+if [ $ret -eq 0 ];then
+ echo "PASS: netns connectivity: ns1 and ns2 can reach each other"
+fi
+
+test_ebtables_broute
+ret=$?
+for i in 0 1 2; do ip netns del ns$i;done
+
+exit $ret
diff --git a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
new file mode 100755
index 000000000000..b48e1833bc89
--- /dev/null
+++ b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
@@ -0,0 +1,283 @@
+#!/bin/bash
+#
+# check that ICMP df-needed/pkttoobig icmp are set are set as related
+# state
+#
+# Setup is:
+#
+# nsclient1 -> nsrouter1 -> nsrouter2 -> nsclient2
+# MTU 1500, except for nsrouter2 <-> nsclient2 link (1280).
+# ping nsclient2 from nsclient1, checking that conntrack did set RELATED
+# 'fragmentation needed' icmp packet.
+#
+# In addition, nsrouter1 will perform IP masquerading, i.e. also
+# check the icmp errors are propagated to the correct host as per
+# nat of "established" icmp-echo "connection".
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+cleanup() {
+ for i in 1 2;do ip netns del nsclient$i;done
+ for i in 1 2;do ip netns del nsrouter$i;done
+}
+
+ipv4() {
+ echo -n 192.168.$1.2
+}
+
+ipv6 () {
+ echo -n dead:$1::2
+}
+
+check_counter()
+{
+ ns=$1
+ name=$2
+ expect=$3
+ local lret=0
+
+ cnt=$(ip netns exec $ns nft list counter inet filter "$name" | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ echo "ERROR: counter $name in $ns has unexpected value (expected $expect)" 1>&2
+ ip netns exec $ns nft list counter inet filter "$name" 1>&2
+ lret=1
+ fi
+
+ return $lret
+}
+
+check_unknown()
+{
+ expect="packets 0 bytes 0"
+ for n in nsclient1 nsclient2 nsrouter1 nsrouter2; do
+ check_counter $n "unknown" "$expect"
+ if [ $? -ne 0 ] ;then
+ return 1
+ fi
+ done
+
+ return 0
+}
+
+for n in nsclient1 nsclient2 nsrouter1 nsrouter2; do
+ ip netns add $n
+ ip -net $n link set lo up
+done
+
+DEV=veth0
+ip link add $DEV netns nsclient1 type veth peer name eth1 netns nsrouter1
+DEV=veth0
+ip link add $DEV netns nsclient2 type veth peer name eth1 netns nsrouter2
+
+DEV=veth0
+ip link add $DEV netns nsrouter1 type veth peer name eth2 netns nsrouter2
+
+DEV=veth0
+for i in 1 2; do
+ ip -net nsclient$i link set $DEV up
+ ip -net nsclient$i addr add $(ipv4 $i)/24 dev $DEV
+ ip -net nsclient$i addr add $(ipv6 $i)/64 dev $DEV
+done
+
+ip -net nsrouter1 link set eth1 up
+ip -net nsrouter1 link set veth0 up
+
+ip -net nsrouter2 link set eth1 up
+ip -net nsrouter2 link set eth2 up
+
+ip -net nsclient1 route add default via 192.168.1.1
+ip -net nsclient1 -6 route add default via dead:1::1
+
+ip -net nsclient2 route add default via 192.168.2.1
+ip -net nsclient2 route add default via dead:2::1
+
+i=3
+ip -net nsrouter1 addr add 192.168.1.1/24 dev eth1
+ip -net nsrouter1 addr add 192.168.3.1/24 dev veth0
+ip -net nsrouter1 addr add dead:1::1/64 dev eth1
+ip -net nsrouter1 addr add dead:3::1/64 dev veth0
+ip -net nsrouter1 route add default via 192.168.3.10
+ip -net nsrouter1 -6 route add default via dead:3::10
+
+ip -net nsrouter2 addr add 192.168.2.1/24 dev eth1
+ip -net nsrouter2 addr add 192.168.3.10/24 dev eth2
+ip -net nsrouter2 addr add dead:2::1/64 dev eth1
+ip -net nsrouter2 addr add dead:3::10/64 dev eth2
+ip -net nsrouter2 route add default via 192.168.3.1
+ip -net nsrouter2 route add default via dead:3::1
+
+sleep 2
+for i in 4 6; do
+ ip netns exec nsrouter1 sysctl -q net.ipv$i.conf.all.forwarding=1
+ ip netns exec nsrouter2 sysctl -q net.ipv$i.conf.all.forwarding=1
+done
+
+for netns in nsrouter1 nsrouter2; do
+ip netns exec $netns nft -f - <<EOF
+table inet filter {
+ counter unknown { }
+ counter related { }
+ chain forward {
+ type filter hook forward priority 0; policy accept;
+ meta l4proto icmpv6 icmpv6 type "packet-too-big" ct state "related" counter name "related" accept
+ meta l4proto icmp icmp type "destination-unreachable" ct state "related" counter name "related" accept
+ meta l4proto { icmp, icmpv6 } ct state new,established accept
+ counter name "unknown" drop
+ }
+}
+EOF
+done
+
+ip netns exec nsclient1 nft -f - <<EOF
+table inet filter {
+ counter unknown { }
+ counter related { }
+ chain input {
+ type filter hook input priority 0; policy accept;
+ meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+ meta l4proto { icmp, icmpv6 } ct state "related" counter name "related" accept
+ counter name "unknown" drop
+ }
+}
+EOF
+
+ip netns exec nsclient2 nft -f - <<EOF
+table inet filter {
+ counter unknown { }
+ counter new { }
+ counter established { }
+
+ chain input {
+ type filter hook input priority 0; policy accept;
+ meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+ meta l4proto { icmp, icmpv6 } ct state "new" counter name "new" accept
+ meta l4proto { icmp, icmpv6 } ct state "established" counter name "established" accept
+ counter name "unknown" drop
+ }
+ chain output {
+ type filter hook output priority 0; policy accept;
+ meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+ meta l4proto { icmp, icmpv6 } ct state "new" counter name "new"
+ meta l4proto { icmp, icmpv6 } ct state "established" counter name "established"
+ counter name "unknown" drop
+ }
+}
+EOF
+
+
+# make sure NAT core rewrites adress of icmp error if nat is used according to
+# conntrack nat information (icmp error will be directed at nsrouter1 address,
+# but it needs to be routed to nsclient1 address).
+ip netns exec nsrouter1 nft -f - <<EOF
+table ip nat {
+ chain postrouting {
+ type nat hook postrouting priority 0; policy accept;
+ ip protocol icmp oifname "veth0" counter masquerade
+ }
+}
+table ip6 nat {
+ chain postrouting {
+ type nat hook postrouting priority 0; policy accept;
+ ip6 nexthdr icmpv6 oifname "veth0" counter masquerade
+ }
+}
+EOF
+
+ip netns exec nsrouter2 ip link set eth1 mtu 1280
+ip netns exec nsclient2 ip link set veth0 mtu 1280
+sleep 1
+
+ip netns exec nsclient1 ping -c 1 -s 1000 -q -M do 192.168.2.2 >/dev/null
+if [ $? -ne 0 ]; then
+ echo "ERROR: netns ip routing/connectivity broken" 1>&2
+ cleanup
+ exit 1
+fi
+ip netns exec nsclient1 ping6 -q -c 1 -s 1000 dead:2::2 >/dev/null
+if [ $? -ne 0 ]; then
+ echo "ERROR: netns ipv6 routing/connectivity broken" 1>&2
+ cleanup
+ exit 1
+fi
+
+check_unknown
+if [ $? -ne 0 ]; then
+ ret=1
+fi
+
+expect="packets 0 bytes 0"
+for netns in nsrouter1 nsrouter2 nsclient1;do
+ check_counter "$netns" "related" "$expect"
+ if [ $? -ne 0 ]; then
+ ret=1
+ fi
+done
+
+expect="packets 2 bytes 2076"
+check_counter nsclient2 "new" "$expect"
+if [ $? -ne 0 ]; then
+ ret=1
+fi
+
+ip netns exec nsclient1 ping -q -c 1 -s 1300 -M do 192.168.2.2 > /dev/null
+if [ $? -eq 0 ]; then
+ echo "ERROR: ping should have failed with PMTU too big error" 1>&2
+ ret=1
+fi
+
+# nsrouter2 should have generated the icmp error, so
+# related counter should be 0 (its in forward).
+expect="packets 0 bytes 0"
+check_counter "nsrouter2" "related" "$expect"
+if [ $? -ne 0 ]; then
+ ret=1
+fi
+
+# but nsrouter1 should have seen it, same for nsclient1.
+expect="packets 1 bytes 576"
+for netns in nsrouter1 nsclient1;do
+ check_counter "$netns" "related" "$expect"
+ if [ $? -ne 0 ]; then
+ ret=1
+ fi
+done
+
+ip netns exec nsclient1 ping6 -c 1 -s 1300 dead:2::2 > /dev/null
+if [ $? -eq 0 ]; then
+ echo "ERROR: ping6 should have failed with PMTU too big error" 1>&2
+ ret=1
+fi
+
+expect="packets 2 bytes 1856"
+for netns in nsrouter1 nsclient1;do
+ check_counter "$netns" "related" "$expect"
+ if [ $? -ne 0 ]; then
+ ret=1
+ fi
+done
+
+if [ $ret -eq 0 ];then
+ echo "PASS: icmp mtu error had RELATED state"
+else
+ echo "ERROR: icmp error RELATED state test has failed"
+fi
+
+cleanup
+exit $ret
diff --git a/tools/testing/selftests/netfilter/nft_flowtable.sh b/tools/testing/selftests/netfilter/nft_flowtable.sh
new file mode 100755
index 000000000000..fe52488a6f72
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_flowtable.sh
@@ -0,0 +1,324 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# This tests basic flowtable functionality.
+# Creates following topology:
+#
+# Originator (MTU 9000) <-Router1-> MTU 1500 <-Router2-> Responder (MTU 2000)
+# Router1 is the one doing flow offloading, Router2 has no special
+# purpose other than having a link that is smaller than either Originator
+# and responder, i.e. TCPMSS announced values are too large and will still
+# result in fragmentation and/or PMTU discovery.
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+ns1in=""
+ns2in=""
+ns1out=""
+ns2out=""
+
+log_netns=$(sysctl -n net.netfilter.nf_log_all_netns)
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+which nc > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nc (netcat)"
+ exit $ksft_skip
+fi
+
+ip netns add nsr1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not create net namespace"
+ exit $ksft_skip
+fi
+
+ip netns add ns1
+ip netns add ns2
+
+ip netns add nsr2
+
+cleanup() {
+ for i in 1 2; do
+ ip netns del ns$i
+ ip netns del nsr$i
+ done
+
+ rm -f "$ns1in" "$ns1out"
+ rm -f "$ns2in" "$ns2out"
+
+ [ $log_netns -eq 0 ] && sysctl -q net.netfilter.nf_log_all_netns=$log_netns
+}
+
+trap cleanup EXIT
+
+sysctl -q net.netfilter.nf_log_all_netns=1
+
+ip link add veth0 netns nsr1 type veth peer name eth0 netns ns1
+ip link add veth1 netns nsr1 type veth peer name veth0 netns nsr2
+
+ip link add veth1 netns nsr2 type veth peer name eth0 netns ns2
+
+for dev in lo veth0 veth1; do
+ for i in 1 2; do
+ ip -net nsr$i link set $dev up
+ done
+done
+
+ip -net nsr1 addr add 10.0.1.1/24 dev veth0
+ip -net nsr1 addr add dead:1::1/64 dev veth0
+
+ip -net nsr2 addr add 10.0.2.1/24 dev veth1
+ip -net nsr2 addr add dead:2::1/64 dev veth1
+
+# set different MTUs so we need to push packets coming from ns1 (large MTU)
+# to ns2 (smaller MTU) to stack either to perform fragmentation (ip_no_pmtu_disc=1),
+# or to do PTMU discovery (send ICMP error back to originator).
+# ns2 is going via nsr2 with a smaller mtu, so that TCPMSS announced by both peers
+# is NOT the lowest link mtu.
+
+ip -net nsr1 link set veth0 mtu 9000
+ip -net ns1 link set eth0 mtu 9000
+
+ip -net nsr2 link set veth1 mtu 2000
+ip -net ns2 link set eth0 mtu 2000
+
+# transfer-net between nsr1 and nsr2.
+# these addresses are not used for connections.
+ip -net nsr1 addr add 192.168.10.1/24 dev veth1
+ip -net nsr1 addr add fee1:2::1/64 dev veth1
+
+ip -net nsr2 addr add 192.168.10.2/24 dev veth0
+ip -net nsr2 addr add fee1:2::2/64 dev veth0
+
+for i in 1 2; do
+ ip netns exec nsr$i sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ ip netns exec nsr$i sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+ ip -net ns$i link set lo up
+ ip -net ns$i link set eth0 up
+ ip -net ns$i addr add 10.0.$i.99/24 dev eth0
+ ip -net ns$i route add default via 10.0.$i.1
+ ip -net ns$i addr add dead:$i::99/64 dev eth0
+ ip -net ns$i route add default via dead:$i::1
+ ip netns exec ns$i sysctl net.ipv4.tcp_no_metrics_save=1 > /dev/null
+
+ # don't set ip DF bit for first two tests
+ ip netns exec ns$i sysctl net.ipv4.ip_no_pmtu_disc=1 > /dev/null
+done
+
+ip -net nsr1 route add default via 192.168.10.2
+ip -net nsr2 route add default via 192.168.10.1
+
+ip netns exec nsr1 nft -f - <<EOF
+table inet filter {
+ flowtable f1 {
+ hook ingress priority 0
+ devices = { veth0, veth1 }
+ }
+
+ chain forward {
+ type filter hook forward priority 0; policy drop;
+
+ # flow offloaded? Tag ct with mark 1, so we can detect when it fails.
+ meta oif "veth1" tcp dport 12345 flow offload @f1 counter
+
+ # use packet size to trigger 'should be offloaded by now'.
+ # otherwise, if 'flow offload' expression never offloads, the
+ # test will pass.
+ tcp dport 12345 meta length gt 200 ct mark set 1 counter
+
+ # this turns off flow offloading internally, so expect packets again
+ tcp flags fin,rst ct mark set 0 accept
+
+ # this allows large packets from responder, we need this as long
+ # as PMTUd is off.
+ # This rule is deleted for the last test, when we expect PMTUd
+ # to kick in and ensure all packets meet mtu requirements.
+ meta length gt 1500 accept comment something-to-grep-for
+
+ # next line blocks connection w.o. working offload.
+ # we only do this for reverse dir, because we expect packets to
+ # enter slow path due to MTU mismatch of veth0 and veth1.
+ tcp sport 12345 ct mark 1 counter log prefix "mark failure " drop
+
+ ct state established,related accept
+
+ # for packets that we can't offload yet, i.e. SYN (any ct that is not confirmed)
+ meta length lt 200 oif "veth1" tcp dport 12345 counter accept
+
+ meta nfproto ipv4 meta l4proto icmp accept
+ meta nfproto ipv6 meta l4proto icmpv6 accept
+ }
+}
+EOF
+
+if [ $? -ne 0 ]; then
+ echo "SKIP: Could not load nft ruleset"
+ exit $ksft_skip
+fi
+
+# test basic connectivity
+ip netns exec ns1 ping -c 1 -q 10.0.2.99 > /dev/null
+if [ $? -ne 0 ];then
+ echo "ERROR: ns1 cannot reach ns2" 1>&2
+ bash
+ exit 1
+fi
+
+ip netns exec ns2 ping -c 1 -q 10.0.1.99 > /dev/null
+if [ $? -ne 0 ];then
+ echo "ERROR: ns2 cannot reach ns1" 1>&2
+ exit 1
+fi
+
+if [ $ret -eq 0 ];then
+ echo "PASS: netns routing/connectivity: ns1 can reach ns2"
+fi
+
+ns1in=$(mktemp)
+ns1out=$(mktemp)
+ns2in=$(mktemp)
+ns2out=$(mktemp)
+
+make_file()
+{
+ name=$1
+ who=$2
+
+ SIZE=$((RANDOM % (1024 * 8)))
+ TSIZE=$((SIZE * 1024))
+
+ dd if=/dev/urandom of="$name" bs=1024 count=$SIZE 2> /dev/null
+
+ SIZE=$((RANDOM % 1024))
+ SIZE=$((SIZE + 128))
+ TSIZE=$((TSIZE + SIZE))
+ dd if=/dev/urandom conf=notrunc of="$name" bs=1 count=$SIZE 2> /dev/null
+}
+
+check_transfer()
+{
+ in=$1
+ out=$2
+ what=$3
+
+ cmp "$in" "$out" > /dev/null 2>&1
+ if [ $? -ne 0 ] ;then
+ echo "FAIL: file mismatch for $what" 1>&2
+ ls -l "$in"
+ ls -l "$out"
+ return 1
+ fi
+
+ return 0
+}
+
+test_tcp_forwarding()
+{
+ local nsa=$1
+ local nsb=$2
+ local lret=0
+
+ ip netns exec $nsb nc -w 5 -l -p 12345 < "$ns2in" > "$ns2out" &
+ lpid=$!
+
+ sleep 1
+ ip netns exec $nsa nc -w 4 10.0.2.99 12345 < "$ns1in" > "$ns1out" &
+ cpid=$!
+
+ sleep 3
+
+ kill $lpid
+ kill $cpid
+ wait
+
+ check_transfer "$ns1in" "$ns2out" "ns1 -> ns2"
+ if [ $? -ne 0 ];then
+ lret=1
+ fi
+
+ check_transfer "$ns2in" "$ns1out" "ns1 <- ns2"
+ if [ $? -ne 0 ];then
+ lret=1
+ fi
+
+ return $lret
+}
+
+make_file "$ns1in" "ns1"
+make_file "$ns2in" "ns2"
+
+# First test:
+# No PMTU discovery, nsr1 is expected to fragment packets from ns1 to ns2 as needed.
+test_tcp_forwarding ns1 ns2
+if [ $? -eq 0 ] ;then
+ echo "PASS: flow offloaded for ns1/ns2"
+else
+ echo "FAIL: flow offload for ns1/ns2:" 1>&2
+ ip netns exec nsr1 nft list ruleset
+ ret=1
+fi
+
+# delete default route, i.e. ns2 won't be able to reach ns1 and
+# will depend on ns1 being masqueraded in nsr1.
+# expect ns1 has nsr1 address.
+ip -net ns2 route del default via 10.0.2.1
+ip -net ns2 route del default via dead:2::1
+ip -net ns2 route add 192.168.10.1 via 10.0.2.1
+
+# Second test:
+# Same, but with NAT enabled.
+ip netns exec nsr1 nft -f - <<EOF
+table ip nat {
+ chain postrouting {
+ type nat hook postrouting priority 0; policy accept;
+ meta oifname "veth1" masquerade
+ }
+}
+EOF
+
+test_tcp_forwarding ns1 ns2
+
+if [ $? -eq 0 ] ;then
+ echo "PASS: flow offloaded for ns1/ns2 with NAT"
+else
+ echo "FAIL: flow offload for ns1/ns2 with NAT" 1>&2
+ ip netns exec nsr1 nft list ruleset
+ ret=1
+fi
+
+# Third test:
+# Same as second test, but with PMTU discovery enabled.
+handle=$(ip netns exec nsr1 nft -a list table inet filter | grep something-to-grep-for | cut -d \# -f 2)
+
+ip netns exec nsr1 nft delete rule inet filter forward $handle
+if [ $? -ne 0 ] ;then
+ echo "FAIL: Could not delete large-packet accept rule"
+ exit 1
+fi
+
+ip netns exec ns1 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
+ip netns exec ns2 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
+
+test_tcp_forwarding ns1 ns2
+if [ $? -eq 0 ] ;then
+ echo "PASS: flow offloaded for ns1/ns2 with NAT and pmtu discovery"
+else
+ echo "FAIL: flow offload for ns1/ns2 with NAT and pmtu discovery" 1>&2
+ ip netns exec nsr1 nft list ruleset
+fi
+
+exit $ret
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
index 8ec76681605c..1be55e705780 100755
--- a/tools/testing/selftests/netfilter/nft_nat.sh
+++ b/tools/testing/selftests/netfilter/nft_nat.sh
@@ -6,6 +6,12 @@
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
ret=0
+test_inet_nat=true
+
+cleanup()
+{
+ for i in 0 1 2; do ip netns del ns$i;done
+}
nft --version > /dev/null 2>&1
if [ $? -ne 0 ];then
@@ -20,10 +26,21 @@ if [ $? -ne 0 ];then
fi
ip netns add ns0
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not create net namespace"
+ exit $ksft_skip
+fi
+
+trap cleanup EXIT
+
ip netns add ns1
ip netns add ns2
-ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
+ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: No virtual ethernet pair device support in kernel"
+ exit $ksft_skip
+fi
ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
ip -net ns0 link set lo up
@@ -141,17 +158,24 @@ reset_counters()
test_local_dnat6()
{
+ local family=$1
local lret=0
+ local IPF=""
+
+ if [ $family = "inet" ];then
+ IPF="ip6"
+ fi
+
ip netns exec ns0 nft -f - <<EOF
-table ip6 nat {
+table $family nat {
chain output {
type nat hook output priority 0; policy accept;
- ip6 daddr dead:1::99 dnat to dead:2::99
+ ip6 daddr dead:1::99 dnat $IPF to dead:2::99
}
}
EOF
if [ $? -ne 0 ]; then
- echo "SKIP: Could not add add ip6 dnat hook"
+ echo "SKIP: Could not add add $family dnat hook"
return $ksft_skip
fi
@@ -201,7 +225,7 @@ EOF
fi
done
- test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2"
+ test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was $family NATted to ns2"
ip netns exec ns0 nft flush chain ip6 nat output
return $lret
@@ -209,15 +233,32 @@ EOF
test_local_dnat()
{
+ local family=$1
local lret=0
-ip netns exec ns0 nft -f - <<EOF
-table ip nat {
+ local IPF=""
+
+ if [ $family = "inet" ];then
+ IPF="ip"
+ fi
+
+ip netns exec ns0 nft -f - <<EOF 2>/dev/null
+table $family nat {
chain output {
type nat hook output priority 0; policy accept;
- ip daddr 10.0.1.99 dnat to 10.0.2.99
+ ip daddr 10.0.1.99 dnat $IPF to 10.0.2.99
}
}
EOF
+ if [ $? -ne 0 ]; then
+ if [ $family = "inet" ];then
+ echo "SKIP: inet nat tests"
+ test_inet_nat=false
+ return $ksft_skip
+ fi
+ echo "SKIP: Could not add add $family dnat hook"
+ return $ksft_skip
+ fi
+
# ping netns1, expect rewrite to netns2
ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
if [ $? -ne 0 ]; then
@@ -264,9 +305,9 @@ EOF
fi
done
- test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2"
+ test $lret -eq 0 && echo "PASS: ping to ns1 was $family NATted to ns2"
- ip netns exec ns0 nft flush chain ip nat output
+ ip netns exec ns0 nft flush chain $family nat output
reset_counters
ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
@@ -313,7 +354,7 @@ EOF
fi
done
- test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush"
+ test $lret -eq 0 && echo "PASS: ping to ns1 OK after $family nat output chain flush"
return $lret
}
@@ -321,6 +362,8 @@ EOF
test_masquerade6()
{
+ local family=$1
+ local natflags=$2
local lret=0
ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
@@ -351,23 +394,27 @@ test_masquerade6()
# add masquerading rule
ip netns exec ns0 nft -f - <<EOF
-table ip6 nat {
+table $family nat {
chain postrouting {
type nat hook postrouting priority 0; policy accept;
- meta oif veth0 masquerade
+ meta oif veth0 masquerade $natflags
}
}
EOF
+ if [ $? -ne 0 ]; then
+ echo "SKIP: Could not add add $family masquerade hook"
+ return $ksft_skip
+ fi
+
ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
if [ $? -ne 0 ] ; then
- echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading"
+ echo "ERROR: cannot ping ns1 from ns2 with active $family masquerade $natflags"
lret=1
fi
# ns1 should have seen packets from ns0, due to masquerade
expect="packets 1 bytes 104"
for dir in "in6" "out6" ; do
-
cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
if [ $? -ne 0 ]; then
bad_counter ns1 ns0$dir "$expect"
@@ -397,19 +444,27 @@ EOF
fi
done
- ip netns exec ns0 nft flush chain ip6 nat postrouting
+ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerade $natflags (attempt 2)"
+ lret=1
+ fi
+
+ ip netns exec ns0 nft flush chain $family nat postrouting
if [ $? -ne 0 ]; then
- echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
+ echo "ERROR: Could not flush $family nat postrouting" 1>&2
lret=1
fi
- test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
+ test $lret -eq 0 && echo "PASS: $family IPv6 masquerade $natflags for ns2"
return $lret
}
test_masquerade()
{
+ local family=$1
+ local natflags=$2
local lret=0
ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
@@ -417,7 +472,7 @@ test_masquerade()
ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
if [ $? -ne 0 ] ; then
- echo "ERROR: canot ping ns1 from ns2"
+ echo "ERROR: cannot ping ns1 from ns2 $natflags"
lret=1
fi
@@ -440,16 +495,21 @@ test_masquerade()
# add masquerading rule
ip netns exec ns0 nft -f - <<EOF
-table ip nat {
+table $family nat {
chain postrouting {
type nat hook postrouting priority 0; policy accept;
- meta oif veth0 masquerade
+ meta oif veth0 masquerade $natflags
}
}
EOF
+ if [ $? -ne 0 ]; then
+ echo "SKIP: Could not add add $family masquerade hook"
+ return $ksft_skip
+ fi
+
ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
if [ $? -ne 0 ] ; then
- echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading"
+ echo "ERROR: cannot ping ns1 from ns2 with active $family masquerade $natflags"
lret=1
fi
@@ -485,19 +545,26 @@ EOF
fi
done
- ip netns exec ns0 nft flush chain ip nat postrouting
+ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 with active ip masquerade $natflags (attempt 2)"
+ lret=1
+ fi
+
+ ip netns exec ns0 nft flush chain $family nat postrouting
if [ $? -ne 0 ]; then
- echo "ERROR: Could not flush nat postrouting" 1>&2
+ echo "ERROR: Could not flush $family nat postrouting" 1>&2
lret=1
fi
- test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
+ test $lret -eq 0 && echo "PASS: $family IP masquerade $natflags for ns2"
return $lret
}
test_redirect6()
{
+ local family=$1
local lret=0
ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
@@ -527,16 +594,21 @@ test_redirect6()
# add redirect rule
ip netns exec ns0 nft -f - <<EOF
-table ip6 nat {
+table $family nat {
chain prerouting {
type nat hook prerouting priority 0; policy accept;
meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect
}
}
EOF
+ if [ $? -ne 0 ]; then
+ echo "SKIP: Could not add add $family redirect hook"
+ return $ksft_skip
+ fi
+
ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
if [ $? -ne 0 ] ; then
- echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect"
+ echo "ERROR: cannot ping ns1 from ns2 via ipv6 with active $family redirect"
lret=1
fi
@@ -560,19 +632,20 @@ EOF
fi
done
- ip netns exec ns0 nft delete table ip6 nat
+ ip netns exec ns0 nft delete table $family nat
if [ $? -ne 0 ]; then
- echo "ERROR: Could not delete ip6 nat table" 1>&2
+ echo "ERROR: Could not delete $family nat table" 1>&2
lret=1
fi
- test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2"
+ test $lret -eq 0 && echo "PASS: $family IPv6 redirection for ns2"
return $lret
}
test_redirect()
{
+ local family=$1
local lret=0
ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
@@ -603,16 +676,21 @@ test_redirect()
# add redirect rule
ip netns exec ns0 nft -f - <<EOF
-table ip nat {
+table $family nat {
chain prerouting {
type nat hook prerouting priority 0; policy accept;
meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect
}
}
EOF
+ if [ $? -ne 0 ]; then
+ echo "SKIP: Could not add add $family redirect hook"
+ return $ksft_skip
+ fi
+
ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
if [ $? -ne 0 ] ; then
- echo "ERROR: cannot ping ns1 from ns2 with active ip redirect"
+ echo "ERROR: cannot ping ns1 from ns2 with active $family ip redirect"
lret=1
fi
@@ -637,13 +715,13 @@ EOF
fi
done
- ip netns exec ns0 nft delete table ip nat
+ ip netns exec ns0 nft delete table $family nat
if [ $? -ne 0 ]; then
- echo "ERROR: Could not delete nat table" 1>&2
+ echo "ERROR: Could not delete $family nat table" 1>&2
lret=1
fi
- test $lret -eq 0 && echo "PASS: IP redirection for ns2"
+ test $lret -eq 0 && echo "PASS: $family IP redirection for ns2"
return $lret
}
@@ -746,17 +824,26 @@ if [ $ret -eq 0 ];then
fi
reset_counters
-test_local_dnat
-test_local_dnat6
-
+test_local_dnat ip
+test_local_dnat6 ip6
reset_counters
-test_masquerade
-test_masquerade6
+$test_inet_nat && test_local_dnat inet
+$test_inet_nat && test_local_dnat6 inet
+for flags in "" "fully-random"; do
+reset_counters
+test_masquerade ip $flags
+test_masquerade6 ip6 $flags
reset_counters
-test_redirect
-test_redirect6
+$test_inet_nat && test_masquerade inet $flags
+$test_inet_nat && test_masquerade6 inet $flags
+done
-for i in 0 1 2; do ip netns del ns$i;done
+reset_counters
+test_redirect ip
+test_redirect6 ip6
+reset_counters
+$test_inet_nat && test_redirect inet
+$test_inet_nat && test_redirect6 inet
exit $ret
diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
index dd4162fc0419..6dee9e636a95 100644
--- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c
+++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
@@ -5,6 +5,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <unistd.h>
#include <sys/time.h>
#include <sys/socket.h>
diff --git a/tools/testing/selftests/networking/timestamping/timestamping.c b/tools/testing/selftests/networking/timestamping/timestamping.c
index 5cdfd743447b..0fbed67bf4f6 100644
--- a/tools/testing/selftests/networking/timestamping/timestamping.c
+++ b/tools/testing/selftests/networking/timestamping/timestamping.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* This program demonstrates how the various time stamping features in
* the Linux kernel work. It emulates the behavior of a PTP
@@ -14,19 +15,6 @@
*
* Copyright (C) 2009 Intel Corporation.
* Author: Patrick Ohly <patrick.ohly@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/networking/timestamping/txtimestamp.c b/tools/testing/selftests/networking/timestamping/txtimestamp.c
index d1bbafb16f47..7e386be47120 100644
--- a/tools/testing/selftests/networking/timestamping/txtimestamp.c
+++ b/tools/testing/selftests/networking/timestamping/txtimestamp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014 Google Inc.
* Author: willemb@google.com (Willem de Bruijn)
@@ -14,20 +15,6 @@
*
* This test requires a dummy TCP server.
* A simple `nc6 [-u] -l -p $DESTPORT` will do
- *
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#define _GNU_SOURCE
diff --git a/tools/testing/selftests/nsfs/Makefile b/tools/testing/selftests/nsfs/Makefile
index 9ff7c7f80625..dd9bd50b7b93 100644
--- a/tools/testing/selftests/nsfs/Makefile
+++ b/tools/testing/selftests/nsfs/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
TEST_GEN_PROGS := owner pidns
CFLAGS := -Wall -Werror
diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh
index 08cbfbbc7029..8a20e03d4cb7 100755
--- a/tools/testing/selftests/ntb/ntb_test.sh
+++ b/tools/testing/selftests/ntb/ntb_test.sh
@@ -1,16 +1,7 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (c) 2016 Microsemi. All Rights Reserved.
#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; either version 2 of
-# the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it would be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
# Author: Logan Gunthorpe <logang@deltatee.com>
REMOTE_HOST=
diff --git a/tools/testing/selftests/pidfd/.gitignore b/tools/testing/selftests/pidfd/.gitignore
new file mode 100644
index 000000000000..822a1e63d045
--- /dev/null
+++ b/tools/testing/selftests/pidfd/.gitignore
@@ -0,0 +1 @@
+pidfd_test
diff --git a/tools/testing/selftests/pidfd/Makefile b/tools/testing/selftests/pidfd/Makefile
new file mode 100644
index 000000000000..443fedbd6231
--- /dev/null
+++ b/tools/testing/selftests/pidfd/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+CFLAGS += -g -I../../../../usr/include/
+
+TEST_GEN_PROGS := pidfd_test
+
+include ../lib.mk
+
diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
new file mode 100644
index 000000000000..104c75a33882
--- /dev/null
+++ b/tools/testing/selftests/pidfd/pidfd_test.c
@@ -0,0 +1,386 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/types.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/mount.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "../kselftest.h"
+
+#ifndef __NR_pidfd_send_signal
+#define __NR_pidfd_send_signal -1
+#endif
+
+static inline int sys_pidfd_send_signal(int pidfd, int sig, siginfo_t *info,
+ unsigned int flags)
+{
+ return syscall(__NR_pidfd_send_signal, pidfd, sig, info, flags);
+}
+
+static int signal_received;
+
+static void set_signal_received_on_sigusr1(int sig)
+{
+ if (sig == SIGUSR1)
+ signal_received = 1;
+}
+
+/*
+ * Straightforward test to see whether pidfd_send_signal() works is to send
+ * a signal to ourself.
+ */
+static int test_pidfd_send_signal_simple_success(void)
+{
+ int pidfd, ret;
+ const char *test_name = "pidfd_send_signal send SIGUSR1";
+
+ pidfd = open("/proc/self", O_DIRECTORY | O_CLOEXEC);
+ if (pidfd < 0)
+ ksft_exit_fail_msg(
+ "%s test: Failed to open process file descriptor\n",
+ test_name);
+
+ signal(SIGUSR1, set_signal_received_on_sigusr1);
+
+ ret = sys_pidfd_send_signal(pidfd, SIGUSR1, NULL, 0);
+ close(pidfd);
+ if (ret < 0)
+ ksft_exit_fail_msg("%s test: Failed to send signal\n",
+ test_name);
+
+ if (signal_received != 1)
+ ksft_exit_fail_msg("%s test: Failed to receive signal\n",
+ test_name);
+
+ signal_received = 0;
+ ksft_test_result_pass("%s test: Sent signal\n", test_name);
+ return 0;
+}
+
+static int wait_for_pid(pid_t pid)
+{
+ int status, ret;
+
+again:
+ ret = waitpid(pid, &status, 0);
+ if (ret == -1) {
+ if (errno == EINTR)
+ goto again;
+
+ return -1;
+ }
+
+ if (ret != pid)
+ goto again;
+
+ if (!WIFEXITED(status))
+ return -1;
+
+ return WEXITSTATUS(status);
+}
+
+static int test_pidfd_send_signal_exited_fail(void)
+{
+ int pidfd, ret, saved_errno;
+ char buf[256];
+ pid_t pid;
+ const char *test_name = "pidfd_send_signal signal exited process";
+
+ pid = fork();
+ if (pid < 0)
+ ksft_exit_fail_msg("%s test: Failed to create new process\n",
+ test_name);
+
+ if (pid == 0)
+ _exit(EXIT_SUCCESS);
+
+ snprintf(buf, sizeof(buf), "/proc/%d", pid);
+
+ pidfd = open(buf, O_DIRECTORY | O_CLOEXEC);
+
+ (void)wait_for_pid(pid);
+
+ if (pidfd < 0)
+ ksft_exit_fail_msg(
+ "%s test: Failed to open process file descriptor\n",
+ test_name);
+
+ ret = sys_pidfd_send_signal(pidfd, 0, NULL, 0);
+ saved_errno = errno;
+ close(pidfd);
+ if (ret == 0)
+ ksft_exit_fail_msg(
+ "%s test: Managed to send signal to process even though it should have failed\n",
+ test_name);
+
+ if (saved_errno != ESRCH)
+ ksft_exit_fail_msg(
+ "%s test: Expected to receive ESRCH as errno value but received %d instead\n",
+ test_name, saved_errno);
+
+ ksft_test_result_pass("%s test: Failed to send signal as expected\n",
+ test_name);
+ return 0;
+}
+
+/*
+ * The kernel reserves 300 pids via RESERVED_PIDS in kernel/pid.c
+ * That means, when it wraps around any pid < 300 will be skipped.
+ * So we need to use a pid > 300 in order to test recycling.
+ */
+#define PID_RECYCLE 1000
+
+/*
+ * Maximum number of cycles we allow. This is equivalent to PID_MAX_DEFAULT.
+ * If users set a higher limit or we have cycled PIDFD_MAX_DEFAULT number of
+ * times then we skip the test to not go into an infinite loop or block for a
+ * long time.
+ */
+#define PIDFD_MAX_DEFAULT 0x8000
+
+/*
+ * Define a few custom error codes for the child process to clearly indicate
+ * what is happening. This way we can tell the difference between a system
+ * error, a test error, etc.
+ */
+#define PIDFD_PASS 0
+#define PIDFD_FAIL 1
+#define PIDFD_ERROR 2
+#define PIDFD_SKIP 3
+#define PIDFD_XFAIL 4
+
+static int test_pidfd_send_signal_recycled_pid_fail(void)
+{
+ int i, ret;
+ pid_t pid1;
+ const char *test_name = "pidfd_send_signal signal recycled pid";
+
+ ret = unshare(CLONE_NEWPID);
+ if (ret < 0)
+ ksft_exit_fail_msg("%s test: Failed to unshare pid namespace\n",
+ test_name);
+
+ ret = unshare(CLONE_NEWNS);
+ if (ret < 0)
+ ksft_exit_fail_msg(
+ "%s test: Failed to unshare mount namespace\n",
+ test_name);
+
+ ret = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
+ if (ret < 0)
+ ksft_exit_fail_msg("%s test: Failed to remount / private\n",
+ test_name);
+
+ /* pid 1 in new pid namespace */
+ pid1 = fork();
+ if (pid1 < 0)
+ ksft_exit_fail_msg("%s test: Failed to create new process\n",
+ test_name);
+
+ if (pid1 == 0) {
+ char buf[256];
+ pid_t pid2;
+ int pidfd = -1;
+
+ (void)umount2("/proc", MNT_DETACH);
+ ret = mount("proc", "/proc", "proc", 0, NULL);
+ if (ret < 0)
+ _exit(PIDFD_ERROR);
+
+ /* grab pid PID_RECYCLE */
+ for (i = 0; i <= PIDFD_MAX_DEFAULT; i++) {
+ pid2 = fork();
+ if (pid2 < 0)
+ _exit(PIDFD_ERROR);
+
+ if (pid2 == 0)
+ _exit(PIDFD_PASS);
+
+ if (pid2 == PID_RECYCLE) {
+ snprintf(buf, sizeof(buf), "/proc/%d", pid2);
+ ksft_print_msg("pid to recycle is %d\n", pid2);
+ pidfd = open(buf, O_DIRECTORY | O_CLOEXEC);
+ }
+
+ if (wait_for_pid(pid2))
+ _exit(PIDFD_ERROR);
+
+ if (pid2 >= PID_RECYCLE)
+ break;
+ }
+
+ /*
+ * We want to be as predictable as we can so if we haven't been
+ * able to grab pid PID_RECYCLE skip the test.
+ */
+ if (pid2 != PID_RECYCLE) {
+ /* skip test */
+ close(pidfd);
+ _exit(PIDFD_SKIP);
+ }
+
+ if (pidfd < 0)
+ _exit(PIDFD_ERROR);
+
+ for (i = 0; i <= PIDFD_MAX_DEFAULT; i++) {
+ char c;
+ int pipe_fds[2];
+ pid_t recycled_pid;
+ int child_ret = PIDFD_PASS;
+
+ ret = pipe2(pipe_fds, O_CLOEXEC);
+ if (ret < 0)
+ _exit(PIDFD_ERROR);
+
+ recycled_pid = fork();
+ if (recycled_pid < 0)
+ _exit(PIDFD_ERROR);
+
+ if (recycled_pid == 0) {
+ close(pipe_fds[1]);
+ (void)read(pipe_fds[0], &c, 1);
+ close(pipe_fds[0]);
+
+ _exit(PIDFD_PASS);
+ }
+
+ /*
+ * Stop the child so we can inspect whether we have
+ * recycled pid PID_RECYCLE.
+ */
+ close(pipe_fds[0]);
+ ret = kill(recycled_pid, SIGSTOP);
+ close(pipe_fds[1]);
+ if (ret) {
+ (void)wait_for_pid(recycled_pid);
+ _exit(PIDFD_ERROR);
+ }
+
+ /*
+ * We have recycled the pid. Try to signal it. This
+ * needs to fail since this is a different process than
+ * the one the pidfd refers to.
+ */
+ if (recycled_pid == PID_RECYCLE) {
+ ret = sys_pidfd_send_signal(pidfd, SIGCONT,
+ NULL, 0);
+ if (ret && errno == ESRCH)
+ child_ret = PIDFD_XFAIL;
+ else
+ child_ret = PIDFD_FAIL;
+ }
+
+ /* let the process move on */
+ ret = kill(recycled_pid, SIGCONT);
+ if (ret)
+ (void)kill(recycled_pid, SIGKILL);
+
+ if (wait_for_pid(recycled_pid))
+ _exit(PIDFD_ERROR);
+
+ switch (child_ret) {
+ case PIDFD_FAIL:
+ /* fallthrough */
+ case PIDFD_XFAIL:
+ _exit(child_ret);
+ case PIDFD_PASS:
+ break;
+ default:
+ /* not reached */
+ _exit(PIDFD_ERROR);
+ }
+
+ /*
+ * If the user set a custom pid_max limit we could be
+ * in the millions.
+ * Skip the test in this case.
+ */
+ if (recycled_pid > PIDFD_MAX_DEFAULT)
+ _exit(PIDFD_SKIP);
+ }
+
+ /* failed to recycle pid */
+ _exit(PIDFD_SKIP);
+ }
+
+ ret = wait_for_pid(pid1);
+ switch (ret) {
+ case PIDFD_FAIL:
+ ksft_exit_fail_msg(
+ "%s test: Managed to signal recycled pid %d\n",
+ test_name, PID_RECYCLE);
+ case PIDFD_PASS:
+ ksft_exit_fail_msg("%s test: Failed to recycle pid %d\n",
+ test_name, PID_RECYCLE);
+ case PIDFD_SKIP:
+ ksft_print_msg("%s test: Skipping test\n", test_name);
+ ret = 0;
+ break;
+ case PIDFD_XFAIL:
+ ksft_test_result_pass(
+ "%s test: Failed to signal recycled pid as expected\n",
+ test_name);
+ ret = 0;
+ break;
+ default /* PIDFD_ERROR */:
+ ksft_exit_fail_msg("%s test: Error while running tests\n",
+ test_name);
+ }
+
+ return ret;
+}
+
+static int test_pidfd_send_signal_syscall_support(void)
+{
+ int pidfd, ret;
+ const char *test_name = "pidfd_send_signal check for support";
+
+ pidfd = open("/proc/self", O_DIRECTORY | O_CLOEXEC);
+ if (pidfd < 0)
+ ksft_exit_fail_msg(
+ "%s test: Failed to open process file descriptor\n",
+ test_name);
+
+ ret = sys_pidfd_send_signal(pidfd, 0, NULL, 0);
+ if (ret < 0) {
+ /*
+ * pidfd_send_signal() will currently return ENOSYS when
+ * CONFIG_PROC_FS is not set.
+ */
+ if (errno == ENOSYS)
+ ksft_exit_skip(
+ "%s test: pidfd_send_signal() syscall not supported (Ensure that CONFIG_PROC_FS=y is set)\n",
+ test_name);
+
+ ksft_exit_fail_msg("%s test: Failed to send signal\n",
+ test_name);
+ }
+
+ close(pidfd);
+ ksft_test_result_pass(
+ "%s test: pidfd_send_signal() syscall is supported. Tests can be executed\n",
+ test_name);
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ ksft_print_header();
+ ksft_set_plan(4);
+
+ test_pidfd_send_signal_syscall_support();
+ test_pidfd_send_signal_simple_success();
+ test_pidfd_send_signal_exited_fail();
+ test_pidfd_send_signal_recycled_pid_fail();
+
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/powerpc/alignment/Makefile b/tools/testing/selftests/powerpc/alignment/Makefile
index d056486f49de..93e9af37449d 100644
--- a/tools/testing/selftests/powerpc/alignment/Makefile
+++ b/tools/testing/selftests/powerpc/alignment/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
TEST_GEN_PROGS := copy_first_unaligned alignment_handler
top_srcdir = ../../../../..
diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
index 169a8b9719fb..0453c50c949c 100644
--- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c
+++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Test the powerpc alignment handler on POWER8/POWER9
*
* Copyright (C) 2017 IBM Corporation (Michael Neuling, Andrew Donnellan)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
/*
diff --git a/tools/testing/selftests/powerpc/alignment/copy_first_unaligned.c b/tools/testing/selftests/powerpc/alignment/copy_first_unaligned.c
index 5a9589987702..db4e8c680500 100644
--- a/tools/testing/selftests/powerpc/alignment/copy_first_unaligned.c
+++ b/tools/testing/selftests/powerpc/alignment/copy_first_unaligned.c
@@ -1,14 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016, Chris Smart, IBM Corporation.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Calls to copy_first which are not 128-byte aligned should be
* caught and sent a SIGBUS.
- *
*/
#include <signal.h>
diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
index 87f1f0252299..a2e8c9da7fa5 100644
--- a/tools/testing/selftests/powerpc/benchmarks/context_switch.c
+++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Context switch microbenchmark.
*
* Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#define _GNU_SOURCE
diff --git a/tools/testing/selftests/powerpc/benchmarks/futex_bench.c b/tools/testing/selftests/powerpc/benchmarks/futex_bench.c
index d58e4dc50fcd..017057090490 100644
--- a/tools/testing/selftests/powerpc/benchmarks/futex_bench.c
+++ b/tools/testing/selftests/powerpc/benchmarks/futex_bench.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2016, Anton Blanchard, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#define _GNU_SOURCE
diff --git a/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c b/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c
index 3af3c21e8036..6b415683357b 100644
--- a/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c
+++ b/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015, Anton Blanchard, IBM Corp.
- * Licensed under GPLv2.
*/
#include <sys/time.h>
diff --git a/tools/testing/selftests/powerpc/benchmarks/mmap_bench.c b/tools/testing/selftests/powerpc/benchmarks/mmap_bench.c
index 033de0560d99..2525adf64342 100644
--- a/tools/testing/selftests/powerpc/benchmarks/mmap_bench.c
+++ b/tools/testing/selftests/powerpc/benchmarks/mmap_bench.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2016, Anton Blanchard, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/powerpc/benchmarks/null_syscall.c b/tools/testing/selftests/powerpc/benchmarks/null_syscall.c
index ecc14d68e101..579f0215c6e7 100644
--- a/tools/testing/selftests/powerpc/benchmarks/null_syscall.c
+++ b/tools/testing/selftests/powerpc/benchmarks/null_syscall.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Test null syscall performance
*
* Copyright (C) 2009-2015 Anton Blanchard, IBM
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#define NR_LOOPS 10000000
@@ -25,7 +21,7 @@ unsigned long long clock_frequency;
unsigned long long timebase_frequency;
double timebase_multiplier;
-static inline unsigned long long mftb(void)
+static inline unsigned long mftb(void)
{
unsigned long low;
diff --git a/tools/testing/selftests/powerpc/cache_shape/cache_shape.c b/tools/testing/selftests/powerpc/cache_shape/cache_shape.c
index 29ec07eba7f9..171b6c9480eb 100644
--- a/tools/testing/selftests/powerpc/cache_shape/cache_shape.c
+++ b/tools/testing/selftests/powerpc/cache_shape/cache_shape.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2017, Michael Ellerman, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <elf.h>
diff --git a/tools/testing/selftests/powerpc/copyloops/asm/export.h b/tools/testing/selftests/powerpc/copyloops/asm/export.h
index 0bab35f6777a..05c1663c89b0 100644
--- a/tools/testing/selftests/powerpc/copyloops/asm/export.h
+++ b/tools/testing/selftests/powerpc/copyloops/asm/export.h
@@ -1,2 +1,3 @@
/* SPDX-License-Identifier: GPL-2.0 */
#define EXPORT_SYMBOL(x)
+#define EXPORT_SYMBOL_KASAN(x)
diff --git a/tools/testing/selftests/powerpc/copyloops/asm/kasan.h b/tools/testing/selftests/powerpc/copyloops/asm/kasan.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/tools/testing/selftests/powerpc/copyloops/asm/kasan.h
diff --git a/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h b/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h
index 0605df807593..58c1cef3e399 100644
--- a/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h
+++ b/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h
@@ -25,6 +25,7 @@
#define _GLOBAL(A) FUNC_START(test_ ## A)
#define _GLOBAL_TOC(A) _GLOBAL(A)
+#define _GLOBAL_TOC_KASAN(A) _GLOBAL(A)
#define PPC_MTOCRF(A, B) mtocrf A, B
diff --git a/tools/testing/selftests/powerpc/dscr/dscr.h b/tools/testing/selftests/powerpc/dscr/dscr.h
index cdb840bc54f2..13e9b9e28e2c 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr.h
+++ b/tools/testing/selftests/powerpc/dscr/dscr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* POWER Data Stream Control Register (DSCR)
*
@@ -6,10 +7,6 @@
*
* Copyright 2012, Anton Blanchard, IBM Corporation.
* Copyright 2015, Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#ifndef _SELFTESTS_POWERPC_DSCR_DSCR_H
#define _SELFTESTS_POWERPC_DSCR_DSCR_H
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_default_test.c b/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
index 9e1a37e93b63..288a4e2ad156 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) default test
*
@@ -7,10 +8,6 @@
*
* Copyright 2012, Anton Blanchard, IBM Corporation.
* Copyright 2015, Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#include "dscr.h"
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_explicit_test.c b/tools/testing/selftests/powerpc/dscr/dscr_explicit_test.c
index ad9c3ec26048..aefcd8d8759b 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_explicit_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_explicit_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) explicit test
*
@@ -13,10 +14,6 @@
*
* Copyright 2012, Anton Blanchard, IBM Corporation.
* Copyright 2015, Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#include "dscr.h"
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c b/tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c
index c8c240accc0c..7c1cb46397c6 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) fork exec test
*
@@ -12,10 +13,6 @@
*
* Copyright 2012, Anton Blanchard, IBM Corporation.
* Copyright 2015, Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#include "dscr.h"
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c b/tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c
index 3e5a6d195e9a..04297a69ab59 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) fork test
*
@@ -13,10 +14,6 @@
*
* Copyright 2012, Anton Blanchard, IBM Corporation.
* Copyright 2015, Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#include "dscr.h"
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
index 1899bd85121f..02f6b4efde14 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) sysfs interface test
*
@@ -6,10 +7,6 @@
* well verified from their sysfs interfaces.
*
* Copyright 2015, Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#include "dscr.h"
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c
index ad97b592eccc..37be2c25f277 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) sysfs thread test
*
@@ -7,10 +8,6 @@
* executing on individual CPUs on the system.
*
* Copyright 2015, Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#define _GNU_SOURCE
#include "dscr.h"
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_user_test.c b/tools/testing/selftests/powerpc/dscr/dscr_user_test.c
index 77d16b5e7dca..eaf785d11eed 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_user_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_user_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER Data Stream Control Register (DSCR) SPR test
*
@@ -14,10 +15,6 @@
*
* Copyright 2013, Anton Blanchard, IBM Corporation.
* Copyright 2015, Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#include "dscr.h"
diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
index 9d7166dfad1e..0ad4f12b3d43 100644
--- a/tools/testing/selftests/powerpc/harness.c
+++ b/tools/testing/selftests/powerpc/harness.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2013, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <errno.h>
@@ -21,6 +21,7 @@
#define KILL_TIMEOUT 5
+/* Setting timeout to -1 disables the alarm */
static uint64_t timeout = 120;
int run_test(int (test_function)(void), char *name)
@@ -43,8 +44,9 @@ int run_test(int (test_function)(void), char *name)
setpgid(pid, pid);
- /* Wake us up in timeout seconds */
- alarm(timeout);
+ if (timeout != -1)
+ /* Wake us up in timeout seconds */
+ alarm(timeout);
terminated = false;
wait:
diff --git a/tools/testing/selftests/powerpc/include/fpu_asm.h b/tools/testing/selftests/powerpc/include/fpu_asm.h
index 6a387d255e27..58ac2ce33505 100644
--- a/tools/testing/selftests/powerpc/include/fpu_asm.h
+++ b/tools/testing/selftests/powerpc/include/fpu_asm.h
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2016, Cyril Bur, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _SELFTESTS_POWERPC_FPU_ASM_H
diff --git a/tools/testing/selftests/powerpc/include/gpr_asm.h b/tools/testing/selftests/powerpc/include/gpr_asm.h
index f6f38852d3a0..5db74f5c6131 100644
--- a/tools/testing/selftests/powerpc/include/gpr_asm.h
+++ b/tools/testing/selftests/powerpc/include/gpr_asm.h
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2016, Cyril Bur, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#ifndef _SELFTESTS_POWERPC_GPR_ASM_H
diff --git a/tools/testing/selftests/powerpc/include/reg.h b/tools/testing/selftests/powerpc/include/reg.h
index 52b4710469d2..022c5076b2c5 100644
--- a/tools/testing/selftests/powerpc/include/reg.h
+++ b/tools/testing/selftests/powerpc/include/reg.h
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#ifndef _SELFTESTS_POWERPC_REG_H
@@ -77,6 +77,16 @@
#define TEXASR_TE 0x0000000004000000
#define TEXASR_ROT 0x0000000002000000
+/* MSR register bits */
+#define MSR_TS_S_LG 33 /* Trans Mem state: Suspended */
+#define MSR_TS_T_LG 34 /* Trans Mem state: Active */
+
+#define __MASK(X) (1UL<<(X))
+
+/* macro to check TM MSR bits */
+#define MSR_TS_S __MASK(MSR_TS_S_LG) /* Transaction Suspended */
+#define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */
+
/* Vector Instructions */
#define VSX_XX1(xs, ra, rb) (((xs) & 0x1f) << 21 | ((ra) << 16) | \
((rb) << 11) | (((xs) >> 5)))
diff --git a/tools/testing/selftests/powerpc/include/subunit.h b/tools/testing/selftests/powerpc/include/subunit.h
index 9c6c4e901ab6..068d55fdf80f 100644
--- a/tools/testing/selftests/powerpc/include/subunit.h
+++ b/tools/testing/selftests/powerpc/include/subunit.h
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2013, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#ifndef _SELFTESTS_POWERPC_SUBUNIT_H
diff --git a/tools/testing/selftests/powerpc/include/utils.h b/tools/testing/selftests/powerpc/include/utils.h
index ae43a614835d..0e2b2e6284ac 100644
--- a/tools/testing/selftests/powerpc/include/utils.h
+++ b/tools/testing/selftests/powerpc/include/utils.h
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2013, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#ifndef _SELFTESTS_POWERPC_UTILS_H
@@ -102,8 +102,10 @@ do { \
#if defined(__powerpc64__)
#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.gp_regs[PT_NIP]
+#define UCONTEXT_MSR(UC) (UC)->uc_mcontext.gp_regs[PT_MSR]
#elif defined(__powerpc__)
#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.uc_regs->gregs[PT_NIP]
+#define UCONTEXT_MSR(UC) (UC)->uc_mcontext.uc_regs->gregs[PT_MSR]
#else
#error implement UCONTEXT_NIA
#endif
diff --git a/tools/testing/selftests/powerpc/include/vmx_asm.h b/tools/testing/selftests/powerpc/include/vmx_asm.h
index 2eaaeca9cf1d..ad9fb1b4069d 100644
--- a/tools/testing/selftests/powerpc/include/vmx_asm.h
+++ b/tools/testing/selftests/powerpc/include/vmx_asm.h
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2015, Cyril Bur, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "basic_asm.h"
diff --git a/tools/testing/selftests/powerpc/include/vsx_asm.h b/tools/testing/selftests/powerpc/include/vsx_asm.h
index 54064ced9e95..434ca2f9bfae 100644
--- a/tools/testing/selftests/powerpc/include/vsx_asm.h
+++ b/tools/testing/selftests/powerpc/include/vsx_asm.h
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2015, Cyril Bur, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "basic_asm.h"
diff --git a/tools/testing/selftests/powerpc/lib/reg.S b/tools/testing/selftests/powerpc/lib/reg.S
index 0dc44f0da065..9304ea7d59b9 100644
--- a/tools/testing/selftests/powerpc/lib/reg.S
+++ b/tools/testing/selftests/powerpc/lib/reg.S
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* test helper assembly functions
*
* Copyright (C) 2016 Simon Guo, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <ppc-asm.h>
#include "reg.h"
diff --git a/tools/testing/selftests/powerpc/math/fpu_asm.S b/tools/testing/selftests/powerpc/math/fpu_asm.S
index 8a04bb117b69..9dc0c158f871 100644
--- a/tools/testing/selftests/powerpc/math/fpu_asm.S
+++ b/tools/testing/selftests/powerpc/math/fpu_asm.S
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2015, Cyril Bur, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "basic_asm.h"
diff --git a/tools/testing/selftests/powerpc/math/fpu_preempt.c b/tools/testing/selftests/powerpc/math/fpu_preempt.c
index 0f85b79d883d..5235bdc8c0b1 100644
--- a/tools/testing/selftests/powerpc/math/fpu_preempt.c
+++ b/tools/testing/selftests/powerpc/math/fpu_preempt.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* This test attempts to see if the FPU registers change across preemption.
* Two things should be noted here a) The check_fpu function in asm only checks
* the non volatile registers as it is reused from the syscall test b) There is
diff --git a/tools/testing/selftests/powerpc/math/fpu_signal.c b/tools/testing/selftests/powerpc/math/fpu_signal.c
index 888aa51b4204..7b1addd50420 100644
--- a/tools/testing/selftests/powerpc/math/fpu_signal.c
+++ b/tools/testing/selftests/powerpc/math/fpu_signal.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* This test attempts to see if the FPU registers are correctly reported in a
* signal context. Each worker just spins checking its FPU registers, at some
* point a signal will interrupt it and C code will check the signal context
diff --git a/tools/testing/selftests/powerpc/math/fpu_syscall.c b/tools/testing/selftests/powerpc/math/fpu_syscall.c
index 949e6721256d..694f225c7e45 100644
--- a/tools/testing/selftests/powerpc/math/fpu_syscall.c
+++ b/tools/testing/selftests/powerpc/math/fpu_syscall.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* This test attempts to see if the FPU registers change across a syscall (fork).
*/
diff --git a/tools/testing/selftests/powerpc/math/vmx_asm.S b/tools/testing/selftests/powerpc/math/vmx_asm.S
index cb1e5ae1be99..11b0704c597d 100644
--- a/tools/testing/selftests/powerpc/math/vmx_asm.S
+++ b/tools/testing/selftests/powerpc/math/vmx_asm.S
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2015, Cyril Bur, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "basic_asm.h"
diff --git a/tools/testing/selftests/powerpc/math/vmx_preempt.c b/tools/testing/selftests/powerpc/math/vmx_preempt.c
index 9ef376c55b13..2e059f154e77 100644
--- a/tools/testing/selftests/powerpc/math/vmx_preempt.c
+++ b/tools/testing/selftests/powerpc/math/vmx_preempt.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* This test attempts to see if the VMX registers change across preemption.
* Two things should be noted here a) The check_vmx function in asm only checks
* the non volatile registers as it is reused from the syscall test b) There is
diff --git a/tools/testing/selftests/powerpc/math/vmx_signal.c b/tools/testing/selftests/powerpc/math/vmx_signal.c
index 671d7533a557..785a48e0976f 100644
--- a/tools/testing/selftests/powerpc/math/vmx_signal.c
+++ b/tools/testing/selftests/powerpc/math/vmx_signal.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* This test attempts to see if the VMX registers are correctly reported in a
* signal context. Each worker just spins checking its VMX registers, at some
* point a signal will interrupt it and C code will check the signal context
diff --git a/tools/testing/selftests/powerpc/math/vmx_syscall.c b/tools/testing/selftests/powerpc/math/vmx_syscall.c
index a017918ee1ca..9ee293cc868e 100644
--- a/tools/testing/selftests/powerpc/math/vmx_syscall.c
+++ b/tools/testing/selftests/powerpc/math/vmx_syscall.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* This test attempts to see if the VMX registers change across a syscall (fork).
*/
diff --git a/tools/testing/selftests/powerpc/math/vsx_asm.S b/tools/testing/selftests/powerpc/math/vsx_asm.S
index 8f431f6abc49..ffc165d984cc 100644
--- a/tools/testing/selftests/powerpc/math/vsx_asm.S
+++ b/tools/testing/selftests/powerpc/math/vsx_asm.S
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2015, Cyril Bur, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "basic_asm.h"
diff --git a/tools/testing/selftests/powerpc/math/vsx_preempt.c b/tools/testing/selftests/powerpc/math/vsx_preempt.c
index 6387f03a0a6a..63de9c6e2cd3 100644
--- a/tools/testing/selftests/powerpc/math/vsx_preempt.c
+++ b/tools/testing/selftests/powerpc/math/vsx_preempt.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015, Cyril Bur, IBM Corp.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* This test attempts to see if the VSX registers change across preemption.
* There is no way to be sure preemption happened so this test just
* uses many threads and a long wait. As such, a successful test
diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore
index ba919308fe30..d503b8764a8e 100644
--- a/tools/testing/selftests/powerpc/mm/.gitignore
+++ b/tools/testing/selftests/powerpc/mm/.gitignore
@@ -3,4 +3,5 @@ subpage_prot
tempfile
prot_sao
segv_errors
-wild_bctr \ No newline at end of file
+wild_bctr
+large_vm_fork_separation \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index 43d68420e363..f1fbc15800c4 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -2,7 +2,8 @@
noarg:
$(MAKE) -C ../
-TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr
+TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \
+ large_vm_fork_separation
TEST_GEN_FILES := tempfile
top_srcdir = ../../../../..
@@ -13,6 +14,7 @@ $(TEST_GEN_PROGS): ../harness.c
$(OUTPUT)/prot_sao: ../utils.c
$(OUTPUT)/wild_bctr: CFLAGS += -m64
+$(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64
$(OUTPUT)/tempfile:
dd if=/dev/zero of=$@ bs=64k count=1
diff --git a/tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c b/tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c
new file mode 100644
index 000000000000..2363a7f3ab0d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2019, Michael Ellerman, IBM Corp.
+//
+// Test that allocating memory beyond the memory limit and then forking is
+// handled correctly, ie. the child is able to access the mappings beyond the
+// memory limit and the child's writes are not visible to the parent.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+
+#ifndef MAP_FIXED_NOREPLACE
+#define MAP_FIXED_NOREPLACE MAP_FIXED // "Should be safe" above 512TB
+#endif
+
+
+static int test(void)
+{
+ int p2c[2], c2p[2], rc, status, c, *p;
+ unsigned long page_size;
+ pid_t pid;
+
+ page_size = sysconf(_SC_PAGESIZE);
+ SKIP_IF(page_size != 65536);
+
+ // Create a mapping at 512TB to allocate an extended_id
+ p = mmap((void *)(512ul << 40), page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
+ if (p == MAP_FAILED) {
+ perror("mmap");
+ printf("Error: couldn't mmap(), confirm kernel has 4TB support?\n");
+ return 1;
+ }
+
+ printf("parent writing %p = 1\n", p);
+ *p = 1;
+
+ FAIL_IF(pipe(p2c) == -1 || pipe(c2p) == -1);
+
+ pid = fork();
+ if (pid == 0) {
+ FAIL_IF(read(p2c[0], &c, 1) != 1);
+
+ pid = getpid();
+ printf("child writing %p = %d\n", p, pid);
+ *p = pid;
+
+ FAIL_IF(write(c2p[1], &c, 1) != 1);
+ FAIL_IF(read(p2c[0], &c, 1) != 1);
+ exit(0);
+ }
+
+ c = 0;
+ FAIL_IF(write(p2c[1], &c, 1) != 1);
+ FAIL_IF(read(c2p[0], &c, 1) != 1);
+
+ // Prevent compiler optimisation
+ barrier();
+
+ rc = 0;
+ printf("parent reading %p = %d\n", p, *p);
+ if (*p != 1) {
+ printf("Error: BUG! parent saw child's write! *p = %d\n", *p);
+ rc = 1;
+ }
+
+ FAIL_IF(write(p2c[1], &c, 1) != 1);
+ FAIL_IF(waitpid(pid, &status, 0) == -1);
+ FAIL_IF(!WIFEXITED(status) || WEXITSTATUS(status));
+
+ if (rc == 0)
+ printf("success: test completed OK\n");
+
+ return rc;
+}
+
+int main(void)
+{
+ return test_harness(test, "large_vm_fork_separation");
+}
diff --git a/tools/testing/selftests/powerpc/mm/prot_sao.c b/tools/testing/selftests/powerpc/mm/prot_sao.c
index 611530d43fa9..e2eed65b7735 100644
--- a/tools/testing/selftests/powerpc/mm/prot_sao.c
+++ b/tools/testing/selftests/powerpc/mm/prot_sao.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2016, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/powerpc/pmu/count_instructions.c b/tools/testing/selftests/powerpc/pmu/count_instructions.c
index 4622117b24c0..a3984ef1e96a 100644
--- a/tools/testing/selftests/powerpc/pmu/count_instructions.c
+++ b/tools/testing/selftests/powerpc/pmu/count_instructions.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2013, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#define _GNU_SOURCE
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
index 94110b1dcd3d..a2d7b0e3dca9 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <stdbool.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/busy_loop.S b/tools/testing/selftests/powerpc/pmu/ebb/busy_loop.S
index c7e4093f1cd3..4866a3a76d22 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/busy_loop.S
+++ b/tools/testing/selftests/powerpc/pmu/ebb/busy_loop.S
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <ppc-asm.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c b/tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c
index ac18cf617dd6..ca9aeb0d8272 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/close_clears_pmcc_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c
index f0632e7fdf29..3cd33eb51e5e 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <signal.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c
index 33e56a2342e5..8466ef9d7de8 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <signal.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
index 7c57a8d79535..bc893813483e 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
index ecf5ee3283a3..dcd351d20328 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
index c0faba520b35..94c99c12c0f2 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
index 46681fec549b..dfbc5c3ad52d 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#define _GNU_SOURCE /* For CPU_ZERO etc. */
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.h b/tools/testing/selftests/powerpc/pmu/ebb/ebb.h
index f87e761f82d0..b5bc2b616075 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb.h
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.h
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#ifndef _SELFTESTS_POWERPC_PMU_EBB_EBB_H
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_handler.S b/tools/testing/selftests/powerpc/pmu/ebb/ebb_handler.S
index 14274ea206e5..c170398de91a 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_handler.S
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_handler.S
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <ppc-asm.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c
index 1e7b7fe2396b..8980f054d8d9 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_child_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <signal.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
index a991d2ea8d0a..ca2f7d729155 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <signal.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c
index af20a2b363aa..4d822cb3589c 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <signal.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c b/tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c
index 7762ab26e5ac..6e6dd0bce1f9 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/event_attributes_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S b/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S
index b866a0581d32..08a7b5f133b9 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S
+++ b/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <ppc-asm.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c b/tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c
index 167135bd92a8..2b25b55452d9 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/fork_cleanup_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <signal.h>
@@ -11,7 +11,6 @@
#include <sys/wait.h>
#include <unistd.h>
#include <setjmp.h>
-#include <signal.h>
#include "ebb.h"
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c b/tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c
index 35a3426e341c..eed338b18e11 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#define _GNU_SOURCE
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
index 2ed7ad33f7a3..ac3e6e182614 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <sched.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
index 6ff8c8ff27d6..b8242e9d97d2 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
index 037cb6154f36..a05c0e18ded6 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <stdbool.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c b/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c
index 8341d7778d5e..fc5bf4870d8e 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
index c5fa64790c22..153ebc92234f 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <sched.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
index 30e1ac62e8cb..eadad75ed7e6 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/reg_access_test.c b/tools/testing/selftests/powerpc/pmu/ebb/reg_access_test.c
index f923228bca22..bd1ace9a055d 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/reg_access_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/reg_access_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c b/tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c
index 1846f4e84635..0aa2aefd36d4 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/task_event_pinned_vs_ebb_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <signal.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c b/tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c
index e3bc6e92a6a5..3e9d95ad9dfe 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/task_event_vs_ebb_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <signal.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/trace.c b/tools/testing/selftests/powerpc/pmu/ebb/trace.c
index 251e66ab2aa7..0c59f66a6fb2 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/trace.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/trace.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <errno.h>
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/trace.h b/tools/testing/selftests/powerpc/pmu/ebb/trace.h
index 926458e28c8b..7c0fb5d2bdb1 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/trace.h
+++ b/tools/testing/selftests/powerpc/pmu/ebb/trace.h
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#ifndef _SELFTESTS_POWERPC_PMU_EBB_TRACE_H
diff --git a/tools/testing/selftests/powerpc/pmu/event.c b/tools/testing/selftests/powerpc/pmu/event.c
index 184b36807d48..48e3a413b15d 100644
--- a/tools/testing/selftests/powerpc/pmu/event.c
+++ b/tools/testing/selftests/powerpc/pmu/event.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2013, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#define _GNU_SOURCE
diff --git a/tools/testing/selftests/powerpc/pmu/event.h b/tools/testing/selftests/powerpc/pmu/event.h
index a0ea6b1eef73..302eaab51706 100644
--- a/tools/testing/selftests/powerpc/pmu/event.h
+++ b/tools/testing/selftests/powerpc/pmu/event.h
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2013, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#ifndef _SELFTESTS_POWERPC_PMU_EVENT_H
diff --git a/tools/testing/selftests/powerpc/pmu/l3_bank_test.c b/tools/testing/selftests/powerpc/pmu/l3_bank_test.c
index 77472f31441e..a96d512a18c4 100644
--- a/tools/testing/selftests/powerpc/pmu/l3_bank_test.c
+++ b/tools/testing/selftests/powerpc/pmu/l3_bank_test.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/powerpc/pmu/lib.c b/tools/testing/selftests/powerpc/pmu/lib.c
index 5bf5dd40822b..88690b97b7b9 100644
--- a/tools/testing/selftests/powerpc/pmu/lib.c
+++ b/tools/testing/selftests/powerpc/pmu/lib.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#define _GNU_SOURCE /* For CPU_ZERO etc. */
diff --git a/tools/testing/selftests/powerpc/pmu/lib.h b/tools/testing/selftests/powerpc/pmu/lib.h
index 0213af4ff332..fa12e7d0b4d3 100644
--- a/tools/testing/selftests/powerpc/pmu/lib.h
+++ b/tools/testing/selftests/powerpc/pmu/lib.h
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#ifndef __SELFTESTS_POWERPC_PMU_LIB_H
diff --git a/tools/testing/selftests/powerpc/pmu/loop.S b/tools/testing/selftests/powerpc/pmu/loop.S
index 20c1f0876c47..8cc9b5e2c9de 100644
--- a/tools/testing/selftests/powerpc/pmu/loop.S
+++ b/tools/testing/selftests/powerpc/pmu/loop.S
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2013, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#include <ppc-asm.h>
diff --git a/tools/testing/selftests/powerpc/pmu/per_event_excludes.c b/tools/testing/selftests/powerpc/pmu/per_event_excludes.c
index fddbbc9cae2f..2756fe2efdc5 100644
--- a/tools/testing/selftests/powerpc/pmu/per_event_excludes.c
+++ b/tools/testing/selftests/powerpc/pmu/per_event_excludes.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#define _GNU_SOURCE
diff --git a/tools/testing/selftests/powerpc/primitives/Makefile b/tools/testing/selftests/powerpc/primitives/Makefile
index ea2b7bd09e36..9b9491a63213 100644
--- a/tools/testing/selftests/powerpc/primitives/Makefile
+++ b/tools/testing/selftests/powerpc/primitives/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
CFLAGS += -I$(CURDIR)
TEST_GEN_PROGS := load_unaligned_zeropad
diff --git a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
index ee1e9ca22f0d..1439c8c7ff38 100644
--- a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
+++ b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Userspace test harness for load_unaligned_zeropad. Creates two
* pages and uses mprotect to prevent access to the second page and
@@ -8,11 +9,6 @@
* performed while access to the second page is enabled via mprotect.
*
* Copyright (C) 2014 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <stdlib.h>
diff --git a/tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c b/tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
index 60df0b5e628a..200337daec42 100644
--- a/tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
+++ b/tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* perf events self profiling example test case for hw breakpoints.
*
@@ -14,11 +15,6 @@
* http://ozlabs.org/~anton/junkcode/perf_events_example1.c
*
* Copyright (C) 2018 Michael Neuling, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <unistd.h>
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
index ca29fafeed5d..17cd480c8780 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for GPR/FPR registers
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "ptrace.h"
#include "ptrace-gpr.h"
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h
index e30fef63824c..c5cd53181e2e 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#define GPR_1 1
#define GPR_2 2
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c
index f9b5069db89b..58cb1a860cc9 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for TAR, PPR, DSCR registers
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "ptrace.h"
#include "ptrace-tar.h"
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tar.h b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.h
index aed0aac716d2..d6a4c0aab73d 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tar.h
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.h
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#define TAR_1 10
#define TAR_2 20
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
index a08a91594dbe..82f7bdc2e5e6 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for GPR/FPR registers in TM context
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "ptrace.h"
#include "ptrace-gpr.h"
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
index dbdffa2e2c82..ad65be6e8e85 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for GPR/FPR registers in TM Suspend context
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "ptrace.h"
#include "ptrace-gpr.h"
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
index f47174746231..25e23e73c72e 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for TAR, PPR, DSCR registers in the TM Suspend context
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "ptrace.h"
#include "tm.h"
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
index 18a685bf6a09..f603fe5a445b 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for VMX/VSX registers in the TM Suspend context
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "ptrace.h"
#include "tm.h"
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
index ba04999254e3..068bfed2e606 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test TM SPR registers
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "ptrace.h"
#include "tm.h"
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
index f70023b25e6e..e0d37f07bdeb 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for TAR, PPR, DSCR registers in the TM context
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "ptrace.h"
#include "tm.h"
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
index dfba80058977..8027457b97b7 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for VMX/VSX registers in the TM context
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "ptrace.h"
#include "tm.h"
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c
index 04084ee7d27b..c4fe0e893306 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ptrace test for VMX/VSX registers
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "ptrace.h"
#include "ptrace-vsx.h"
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h
index f4e4b427c9d9..6633485210b6 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#define VEC_MAX 128
#define VSX_MAX 32
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace.h b/tools/testing/selftests/powerpc/ptrace/ptrace.h
index 34201cfa8335..5181ad9b4b6c 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace.h
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Ptrace interface test helper functions
*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <inttypes.h>
#include <unistd.h>
diff --git a/tools/testing/selftests/powerpc/scripts/hmi.sh b/tools/testing/selftests/powerpc/scripts/hmi.sh
index 83fb253ae3bd..dcdb392e8427 100755
--- a/tools/testing/selftests/powerpc/scripts/hmi.sh
+++ b/tools/testing/selftests/powerpc/scripts/hmi.sh
@@ -1,15 +1,8 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
#
# Copyright 2015, Daniel Axtens, IBM Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
# do we have ./getscom, ./putscom?
diff --git a/tools/testing/selftests/powerpc/signal/.gitignore b/tools/testing/selftests/powerpc/signal/.gitignore
index 1b89224a8aab..dca5852a1546 100644
--- a/tools/testing/selftests/powerpc/signal/.gitignore
+++ b/tools/testing/selftests/powerpc/signal/.gitignore
@@ -1,2 +1,3 @@
signal
signal_tm
+sigfuz
diff --git a/tools/testing/selftests/powerpc/signal/Makefile b/tools/testing/selftests/powerpc/signal/Makefile
index 209a958dca12..113838fbbe7f 100644
--- a/tools/testing/selftests/powerpc/signal/Makefile
+++ b/tools/testing/selftests/powerpc/signal/Makefile
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-TEST_GEN_PROGS := signal signal_tm
+TEST_GEN_PROGS := signal signal_tm sigfuz
CFLAGS += -maltivec
$(OUTPUT)/signal_tm: CFLAGS += -mhtm
+$(OUTPUT)/sigfuz: CFLAGS += -pthread -m64
top_srcdir = ../../../../..
include ../../lib.mk
diff --git a/tools/testing/selftests/powerpc/signal/sigfuz.c b/tools/testing/selftests/powerpc/signal/sigfuz.c
new file mode 100644
index 000000000000..dade00c698c2
--- /dev/null
+++ b/tools/testing/selftests/powerpc/signal/sigfuz.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018, Breno Leitao, IBM Corp.
+ * Licensed under GPLv2.
+ *
+ * Sigfuz(tm): A PowerPC TM-aware signal fuzzer.
+ *
+ * This is a new selftest that raises SIGUSR1 signals and handles it in a set
+ * of different ways, trying to create different scenario for testing
+ * purpose.
+ *
+ * This test works raising a signal and calling sigreturn interleaved with
+ * TM operations, as starting, suspending and terminating a transaction. The
+ * test depends on random numbers, and, based on them, it sets different TM
+ * states.
+ *
+ * Other than that, the test fills out the user context struct that is passed
+ * to the sigreturn system call with random data, in order to make sure that
+ * the signal handler syscall can handle different and invalid states
+ * properly.
+ *
+ * This selftest has command line parameters to control what kind of tests the
+ * user wants to run, as for example, if a transaction should be started prior
+ * to signal being raised, or, after the signal being raised and before the
+ * sigreturn. If no parameter is given, the default is enabling all options.
+ *
+ * This test does not check if the user context is being read and set
+ * properly by the kernel. Its purpose, at this time, is basically
+ * guaranteeing that the kernel does not crash on invalid scenarios.
+ */
+
+#include <stdio.h>
+#include <limits.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <string.h>
+#include <ucontext.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include "utils.h"
+
+/* Selftest defaults */
+#define COUNT_MAX 4000 /* Number of interactions */
+#define THREADS 16 /* Number of threads */
+
+/* Arguments options */
+#define ARG_MESS_WITH_TM_AT 0x1
+#define ARG_MESS_WITH_TM_BEFORE 0x2
+#define ARG_MESS_WITH_MSR_AT 0x4
+#define ARG_FOREVER 0x10
+#define ARG_COMPLETE (ARG_MESS_WITH_TM_AT | \
+ ARG_MESS_WITH_TM_BEFORE | \
+ ARG_MESS_WITH_MSR_AT)
+
+static int args;
+static int nthread = THREADS;
+static int count_max = COUNT_MAX;
+
+/* checkpoint context */
+static ucontext_t *tmp_uc;
+
+/* Return true with 1/x probability */
+static int one_in_chance(int x)
+{
+ return rand() % x == 0;
+}
+
+/* Change TM states */
+static void mess_with_tm(void)
+{
+ /* Starts a transaction 33% of the time */
+ if (one_in_chance(3)) {
+ asm ("tbegin. ;"
+ "beq 8 ;");
+
+ /* And suspended half of them */
+ if (one_in_chance(2))
+ asm("tsuspend. ;");
+ }
+
+ /* Call 'tend' in 5% of the runs */
+ if (one_in_chance(20))
+ asm("tend. ;");
+}
+
+/* Signal handler that will be invoked with raise() */
+static void trap_signal_handler(int signo, siginfo_t *si, void *uc)
+{
+ ucontext_t *ucp = uc;
+
+ ucp->uc_link = tmp_uc;
+
+ /*
+ * Set uc_link in three possible ways:
+ * - Setting a single 'int' in the whole chunk
+ * - Cloning ucp into uc_link
+ * - Allocating a new memory chunk
+ */
+ if (one_in_chance(3)) {
+ memset(ucp->uc_link, rand(), sizeof(ucontext_t));
+ } else if (one_in_chance(2)) {
+ memcpy(ucp->uc_link, uc, sizeof(ucontext_t));
+ } else if (one_in_chance(2)) {
+ if (tmp_uc) {
+ free(tmp_uc);
+ tmp_uc = NULL;
+ }
+ tmp_uc = malloc(sizeof(ucontext_t));
+ ucp->uc_link = tmp_uc;
+ /* Trying to cause a major page fault at Kernel level */
+ madvise(ucp->uc_link, sizeof(ucontext_t), MADV_DONTNEED);
+ }
+
+ if (args & ARG_MESS_WITH_MSR_AT) {
+ /* Changing the checkpointed registers */
+ if (one_in_chance(4)) {
+ ucp->uc_link->uc_mcontext.gp_regs[PT_MSR] |= MSR_TS_S;
+ } else {
+ if (one_in_chance(2)) {
+ ucp->uc_link->uc_mcontext.gp_regs[PT_MSR] |=
+ MSR_TS_T;
+ } else if (one_in_chance(2)) {
+ ucp->uc_link->uc_mcontext.gp_regs[PT_MSR] |=
+ MSR_TS_T | MSR_TS_S;
+ }
+ }
+
+ /* Checking the current register context */
+ if (one_in_chance(2)) {
+ ucp->uc_mcontext.gp_regs[PT_MSR] |= MSR_TS_S;
+ } else if (one_in_chance(2)) {
+ if (one_in_chance(2))
+ ucp->uc_mcontext.gp_regs[PT_MSR] |=
+ MSR_TS_T;
+ else if (one_in_chance(2))
+ ucp->uc_mcontext.gp_regs[PT_MSR] |=
+ MSR_TS_T | MSR_TS_S;
+ }
+ }
+
+ if (one_in_chance(20)) {
+ /* Nested transaction start */
+ if (one_in_chance(5))
+ mess_with_tm();
+
+ /* Return without changing any other context info */
+ return;
+ }
+
+ if (one_in_chance(10))
+ ucp->uc_mcontext.gp_regs[PT_MSR] = random();
+ if (one_in_chance(10))
+ ucp->uc_mcontext.gp_regs[PT_NIP] = random();
+ if (one_in_chance(10))
+ ucp->uc_link->uc_mcontext.gp_regs[PT_MSR] = random();
+ if (one_in_chance(10))
+ ucp->uc_link->uc_mcontext.gp_regs[PT_NIP] = random();
+
+ ucp->uc_mcontext.gp_regs[PT_TRAP] = random();
+ ucp->uc_mcontext.gp_regs[PT_DSISR] = random();
+ ucp->uc_mcontext.gp_regs[PT_DAR] = random();
+ ucp->uc_mcontext.gp_regs[PT_ORIG_R3] = random();
+ ucp->uc_mcontext.gp_regs[PT_XER] = random();
+ ucp->uc_mcontext.gp_regs[PT_RESULT] = random();
+ ucp->uc_mcontext.gp_regs[PT_SOFTE] = random();
+ ucp->uc_mcontext.gp_regs[PT_DSCR] = random();
+ ucp->uc_mcontext.gp_regs[PT_CTR] = random();
+ ucp->uc_mcontext.gp_regs[PT_LNK] = random();
+ ucp->uc_mcontext.gp_regs[PT_CCR] = random();
+ ucp->uc_mcontext.gp_regs[PT_REGS_COUNT] = random();
+
+ ucp->uc_link->uc_mcontext.gp_regs[PT_TRAP] = random();
+ ucp->uc_link->uc_mcontext.gp_regs[PT_DSISR] = random();
+ ucp->uc_link->uc_mcontext.gp_regs[PT_DAR] = random();
+ ucp->uc_link->uc_mcontext.gp_regs[PT_ORIG_R3] = random();
+ ucp->uc_link->uc_mcontext.gp_regs[PT_XER] = random();
+ ucp->uc_link->uc_mcontext.gp_regs[PT_RESULT] = random();
+ ucp->uc_link->uc_mcontext.gp_regs[PT_SOFTE] = random();
+ ucp->uc_link->uc_mcontext.gp_regs[PT_DSCR] = random();
+ ucp->uc_link->uc_mcontext.gp_regs[PT_CTR] = random();
+ ucp->uc_link->uc_mcontext.gp_regs[PT_LNK] = random();
+ ucp->uc_link->uc_mcontext.gp_regs[PT_CCR] = random();
+ ucp->uc_link->uc_mcontext.gp_regs[PT_REGS_COUNT] = random();
+
+ if (args & ARG_MESS_WITH_TM_BEFORE) {
+ if (one_in_chance(2))
+ mess_with_tm();
+ }
+}
+
+static void seg_signal_handler(int signo, siginfo_t *si, void *uc)
+{
+ /* Clear exit for process that segfaults */
+ exit(0);
+}
+
+static void *sigfuz_test(void *thrid)
+{
+ struct sigaction trap_sa, seg_sa;
+ int ret, i = 0;
+ pid_t t;
+
+ tmp_uc = malloc(sizeof(ucontext_t));
+
+ /* Main signal handler */
+ trap_sa.sa_flags = SA_SIGINFO;
+ trap_sa.sa_sigaction = trap_signal_handler;
+
+ /* SIGSEGV signal handler */
+ seg_sa.sa_flags = SA_SIGINFO;
+ seg_sa.sa_sigaction = seg_signal_handler;
+
+ /* The signal handler will enable MSR_TS */
+ sigaction(SIGUSR1, &trap_sa, NULL);
+
+ /* If it does not crash, it will segfault, avoid it to retest */
+ sigaction(SIGSEGV, &seg_sa, NULL);
+
+ while (i < count_max) {
+ t = fork();
+
+ if (t == 0) {
+ /* Once seed per process */
+ srand(time(NULL) + getpid());
+ if (args & ARG_MESS_WITH_TM_AT) {
+ if (one_in_chance(2))
+ mess_with_tm();
+ }
+ raise(SIGUSR1);
+ exit(0);
+ } else {
+ waitpid(t, &ret, 0);
+ }
+ if (!(args & ARG_FOREVER))
+ i++;
+ }
+
+ /* If not freed already, free now */
+ if (tmp_uc) {
+ free(tmp_uc);
+ tmp_uc = NULL;
+ }
+
+ return NULL;
+}
+
+static int signal_fuzzer(void)
+{
+ int t, rc;
+ pthread_t *threads;
+
+ threads = malloc(nthread * sizeof(pthread_t));
+
+ for (t = 0; t < nthread; t++) {
+ rc = pthread_create(&threads[t], NULL, sigfuz_test,
+ (void *)&t);
+ if (rc)
+ perror("Thread creation error\n");
+ }
+
+ for (t = 0; t < nthread; t++) {
+ rc = pthread_join(threads[t], NULL);
+ if (rc)
+ perror("Thread join error\n");
+ }
+
+ free(threads);
+
+ return EXIT_SUCCESS;
+}
+
+static void show_help(char *name)
+{
+ printf("%s: Sigfuzzer for powerpc\n", name);
+ printf("Usage:\n");
+ printf("\t-b\t Mess with TM before raising a SIGUSR1 signal\n");
+ printf("\t-a\t Mess with TM after raising a SIGUSR1 signal\n");
+ printf("\t-m\t Mess with MSR[TS] bits at mcontext\n");
+ printf("\t-x\t Mess with everything above\n");
+ printf("\t-f\t Run forever (Press ^C to Quit)\n");
+ printf("\t-i\t Amount of interactions. (Default = %d)\n", COUNT_MAX);
+ printf("\t-t\t Amount of threads. (Default = %d)\n", THREADS);
+ exit(-1);
+}
+
+int main(int argc, char **argv)
+{
+ int opt;
+
+ while ((opt = getopt(argc, argv, "bamxt:fi:h")) != -1) {
+ if (opt == 'b') {
+ printf("Mess with TM before signal\n");
+ args |= ARG_MESS_WITH_TM_BEFORE;
+ } else if (opt == 'a') {
+ printf("Mess with TM at signal handler\n");
+ args |= ARG_MESS_WITH_TM_AT;
+ } else if (opt == 'm') {
+ printf("Mess with MSR[TS] bits in mcontext\n");
+ args |= ARG_MESS_WITH_MSR_AT;
+ } else if (opt == 'x') {
+ printf("Running with all options enabled\n");
+ args |= ARG_COMPLETE;
+ } else if (opt == 't') {
+ nthread = atoi(optarg);
+ printf("Threads = %d\n", nthread);
+ } else if (opt == 'f') {
+ args |= ARG_FOREVER;
+ printf("Press ^C to stop\n");
+ test_harness_set_timeout(-1);
+ } else if (opt == 'i') {
+ count_max = atoi(optarg);
+ printf("Running for %d interactions\n", count_max);
+ } else if (opt == 'h') {
+ show_help(argv[0]);
+ }
+ }
+
+ /* Default test suite */
+ if (!args)
+ args = ARG_COMPLETE;
+
+ test_harness(signal_fuzzer, "signal_fuzzer");
+}
diff --git a/tools/testing/selftests/powerpc/signal/signal.S b/tools/testing/selftests/powerpc/signal/signal.S
index 322f2f1fc327..228fba49935d 100644
--- a/tools/testing/selftests/powerpc/signal/signal.S
+++ b/tools/testing/selftests/powerpc/signal/signal.S
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2015, Cyril Bur, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "basic_asm.h"
diff --git a/tools/testing/selftests/powerpc/signal/signal.c b/tools/testing/selftests/powerpc/signal/signal.c
index e7dedd28b3c2..766e484d984b 100644
--- a/tools/testing/selftests/powerpc/signal/signal.c
+++ b/tools/testing/selftests/powerpc/signal/signal.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016, Cyril Bur, IBM Corp.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Sending one self a signal should always get delivered.
*/
diff --git a/tools/testing/selftests/powerpc/signal/signal_tm.c b/tools/testing/selftests/powerpc/signal/signal_tm.c
index 2e7451a37cc6..5bf2224ef7f2 100644
--- a/tools/testing/selftests/powerpc/signal/signal_tm.c
+++ b/tools/testing/selftests/powerpc/signal/signal_tm.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016, Cyril Bur, IBM Corp.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Sending one self a signal should always get delivered.
*/
diff --git a/tools/testing/selftests/powerpc/stringloops/asm/ppc-opcode.h b/tools/testing/selftests/powerpc/stringloops/asm/ppc-opcode.h
index 9de413c0c2cb..3edd1a1d9128 100644
--- a/tools/testing/selftests/powerpc/stringloops/asm/ppc-opcode.h
+++ b/tools/testing/selftests/powerpc/stringloops/asm/ppc-opcode.h
@@ -1,11 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2009 Freescale Semiconductor, Inc.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* provides masks and opcode images for use by code generation, emulation
* and for instructions that older assemblers might not know about
*/
diff --git a/tools/testing/selftests/powerpc/syscalls/Makefile b/tools/testing/selftests/powerpc/syscalls/Makefile
index 161b8846336f..01b22775ca87 100644
--- a/tools/testing/selftests/powerpc/syscalls/Makefile
+++ b/tools/testing/selftests/powerpc/syscalls/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
TEST_GEN_PROGS := ipc_unmuxed
CFLAGS += -I../../../../../usr/include
diff --git a/tools/testing/selftests/powerpc/syscalls/ipc_unmuxed.c b/tools/testing/selftests/powerpc/syscalls/ipc_unmuxed.c
index 2ac02706f8c8..4c582524aeb3 100644
--- a/tools/testing/selftests/powerpc/syscalls/ipc_unmuxed.c
+++ b/tools/testing/selftests/powerpc/syscalls/ipc_unmuxed.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2015, Michael Ellerman, IBM Corp.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* This test simply tests that certain syscalls are implemented. It doesn't
* actually exercise their logic in any way.
*/
diff --git a/tools/testing/selftests/powerpc/tm/.gitignore b/tools/testing/selftests/powerpc/tm/.gitignore
index 208452a93e2c..951fe855f7cd 100644
--- a/tools/testing/selftests/powerpc/tm/.gitignore
+++ b/tools/testing/selftests/powerpc/tm/.gitignore
@@ -11,6 +11,7 @@ tm-signal-context-chk-fpu
tm-signal-context-chk-gpr
tm-signal-context-chk-vmx
tm-signal-context-chk-vsx
+tm-signal-context-force-tm
tm-signal-sigreturn-nt
tm-vmx-unavail
tm-unavailable
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index 75a685359129..c0734ed0ef56 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -4,7 +4,8 @@ SIGNAL_CONTEXT_CHK_TESTS := tm-signal-context-chk-gpr tm-signal-context-chk-fpu
TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \
tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \
- $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn tm-signal-sigreturn-nt
+ $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn tm-signal-sigreturn-nt \
+ tm-signal-context-force-tm
top_srcdir = ../../../../..
include ../../lib.mk
@@ -20,6 +21,7 @@ $(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64
$(OUTPUT)/tm-resched-dscr: ../pmu/lib.c
$(OUTPUT)/tm-unavailable: CFLAGS += -O0 -pthread -m64 -Wno-error=uninitialized -mvsx
$(OUTPUT)/tm-trap: CFLAGS += -O0 -pthread -m64
+$(OUTPUT)/tm-signal-context-force-tm: CFLAGS += -pthread -m64
SIGNAL_CONTEXT_CHK_TESTS := $(patsubst %,$(OUTPUT)/%,$(SIGNAL_CONTEXT_CHK_TESTS))
$(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S
diff --git a/tools/testing/selftests/powerpc/tm/tm-exec.c b/tools/testing/selftests/powerpc/tm/tm-exec.c
index 3d27fa0ece04..260cfdb97d23 100644
--- a/tools/testing/selftests/powerpc/tm/tm-exec.c
+++ b/tools/testing/selftests/powerpc/tm/tm-exec.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016, Cyril Bur, IBM Corp.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Syscalls can be performed provided the transactions are suspended.
* The exec() class of syscall is unique as a new process is loaded.
*
diff --git a/tools/testing/selftests/powerpc/tm/tm-fork.c b/tools/testing/selftests/powerpc/tm/tm-fork.c
index 8d48579b7778..6efa5a685a77 100644
--- a/tools/testing/selftests/powerpc/tm/tm-fork.c
+++ b/tools/testing/selftests/powerpc/tm/tm-fork.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015, Michael Neuling, IBM Corp.
- * Licensed under GPLv2.
*
* Edited: Rashmica Gupta, Nov 2015
*
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c
index c760debbd5ad..d57c2d2ab6ec 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c
@@ -1,12 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016, Cyril Bur, IBM Corp.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- *
* Test the kernel's signal frame code.
*
* The kernel sets up two sets of ucontexts if the signal was to be
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c
index df91330a08ef..4d05f8b0254c 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c
@@ -1,12 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016, Cyril Bur, IBM Corp.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- *
* Test the kernel's signal frame code.
*
* The kernel sets up two sets of ucontexts if the signal was to be
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c
index f0ee55fd5185..48ad01499b1a 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c
@@ -1,12 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016, Cyril Bur, IBM Corp.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- *
* Test the kernel's signal frame code.
*
* The kernel sets up two sets of ucontexts if the signal was to be
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c
index b99c3d835957..8c8677a408bb 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c
@@ -1,12 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016, Cyril Bur, IBM Corp.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- *
* Test the kernel's signal frame code.
*
* The kernel sets up two sets of ucontexts if the signal was to be
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c b/tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c
new file mode 100644
index 000000000000..31717625f318
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-context-force-tm.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018, Breno Leitao, Gustavo Romero, IBM Corp.
+ *
+ * This test raises a SIGUSR1 signal, and toggle the MSR[TS]
+ * fields at the signal handler. With MSR[TS] being set, the kernel will
+ * force a recheckpoint, which may cause a segfault when returning to
+ * user space. Since the test needs to re-run, the segfault needs to be
+ * caught and handled.
+ *
+ * In order to continue the test even after a segfault, the context is
+ * saved prior to the signal being raised, and it is restored when there is
+ * a segmentation fault. This happens for COUNT_MAX times.
+ *
+ * This test never fails (as returning EXIT_FAILURE). It either succeeds,
+ * or crash the kernel (on a buggy kernel).
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <string.h>
+#include <ucontext.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+#include "tm.h"
+#include "utils.h"
+#include "reg.h"
+
+#define COUNT_MAX 5000 /* Number of interactions */
+
+/*
+ * This test only runs on 64 bits system. Unsetting MSR_TS_S to avoid
+ * compilation issue on 32 bits system. There is no side effect, since the
+ * whole test will be skipped if it is not running on 64 bits system.
+ */
+#ifndef __powerpc64__
+#undef MSR_TS_S
+#define MSR_TS_S 0
+#endif
+
+/* Setting contexts because the test will crash and we want to recover */
+ucontext_t init_context, main_context;
+
+static int count, first_time;
+
+void usr_signal_handler(int signo, siginfo_t *si, void *uc)
+{
+ ucontext_t *ucp = uc;
+ int ret;
+
+ /*
+ * Allocating memory in a signal handler, and never freeing it on
+ * purpose, forcing the heap increase, so, the memory leak is what
+ * we want here.
+ */
+ ucp->uc_link = mmap(NULL, sizeof(ucontext_t),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ if (ucp->uc_link == (void *)-1) {
+ perror("Mmap failed");
+ exit(-1);
+ }
+
+ /* Forcing the page to be allocated in a page fault */
+ ret = madvise(ucp->uc_link, sizeof(ucontext_t), MADV_DONTNEED);
+ if (ret) {
+ perror("madvise failed");
+ exit(-1);
+ }
+
+ memcpy(&ucp->uc_link->uc_mcontext, &ucp->uc_mcontext,
+ sizeof(ucp->uc_mcontext));
+
+ /* Forcing to enable MSR[TM] */
+ UCONTEXT_MSR(ucp) |= MSR_TS_S;
+
+ /*
+ * A fork inside a signal handler seems to be more efficient than a
+ * fork() prior to the signal being raised.
+ */
+ if (fork() == 0) {
+ /*
+ * Both child and parent will return, but, child returns
+ * with count set so it will exit in the next segfault.
+ * Parent will continue to loop.
+ */
+ count = COUNT_MAX;
+ }
+
+ /*
+ * If the change above does not hit the bug, it will cause a
+ * segmentation fault, since the ck structures are NULL.
+ */
+}
+
+void seg_signal_handler(int signo, siginfo_t *si, void *uc)
+{
+ if (count == COUNT_MAX) {
+ /* Return to tm_signal_force_msr() and exit */
+ setcontext(&main_context);
+ }
+
+ count++;
+
+ /* Reexecute the test */
+ setcontext(&init_context);
+}
+
+void tm_trap_test(void)
+{
+ struct sigaction usr_sa, seg_sa;
+ stack_t ss;
+
+ usr_sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
+ usr_sa.sa_sigaction = usr_signal_handler;
+
+ seg_sa.sa_flags = SA_SIGINFO;
+ seg_sa.sa_sigaction = seg_signal_handler;
+
+ /*
+ * Set initial context. Will get back here from
+ * seg_signal_handler()
+ */
+ getcontext(&init_context);
+
+ /* Allocated an alternative signal stack area */
+ ss.ss_sp = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ ss.ss_size = SIGSTKSZ;
+ ss.ss_flags = 0;
+
+ if (ss.ss_sp == (void *)-1) {
+ perror("mmap error\n");
+ exit(-1);
+ }
+
+ /* Force the allocation through a page fault */
+ if (madvise(ss.ss_sp, SIGSTKSZ, MADV_DONTNEED)) {
+ perror("madvise\n");
+ exit(-1);
+ }
+
+ /* Setting an alternative stack to generate a page fault when
+ * the signal is raised.
+ */
+ if (sigaltstack(&ss, NULL)) {
+ perror("sigaltstack\n");
+ exit(-1);
+ }
+
+ /* The signal handler will enable MSR_TS */
+ sigaction(SIGUSR1, &usr_sa, NULL);
+ /* If it does not crash, it will segfault, avoid it to retest */
+ sigaction(SIGSEGV, &seg_sa, NULL);
+
+ raise(SIGUSR1);
+}
+
+int tm_signal_context_force_tm(void)
+{
+ SKIP_IF(!have_htm());
+ /*
+ * Skipping if not running on 64 bits system, since I think it is
+ * not possible to set mcontext's [MSR] with TS, due to it being 32
+ * bits.
+ */
+ SKIP_IF(!is_ppc64le());
+
+ /* Will get back here after COUNT_MAX interactions */
+ getcontext(&main_context);
+
+ if (!first_time++)
+ tm_trap_test();
+
+ return EXIT_SUCCESS;
+}
+
+int main(int argc, char **argv)
+{
+ test_harness(tm_signal_context_force_tm, "tm_signal_context_force_tm");
+}
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-msr-resv.c b/tools/testing/selftests/powerpc/tm/tm-signal-msr-resv.c
index 8c54d18b3e9a..4a61e9bd12b4 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal-msr-resv.c
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-msr-resv.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015, Michael Neuling, IBM Corp.
- * Licensed under GPLv2.
*
* Test the kernel's signal return code to ensure that it doesn't
* crash when both the transactional and suspend MSR bits are set in
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-stack.c b/tools/testing/selftests/powerpc/tm/tm-signal-stack.c
index 1f0eb567438d..cdcf8c5bbbc7 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal-stack.c
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-stack.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015, Michael Neuling, IBM Corp.
- * Licensed under GPLv2.
*
* Test the kernel's signal delievery code to ensure that we don't
* trelaim twice in the kernel signal delivery code. This can happen
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal.S b/tools/testing/selftests/powerpc/tm/tm-signal.S
index 506a4ebaf3ae..c80c9136601b 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal.S
+++ b/tools/testing/selftests/powerpc/tm/tm-signal.S
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright 2015, Cyril Bur, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include "basic_asm.h"
diff --git a/tools/testing/selftests/powerpc/tm/tm-syscall.c b/tools/testing/selftests/powerpc/tm/tm-syscall.c
index 454b965a2db3..becb8207b432 100644
--- a/tools/testing/selftests/powerpc/tm/tm-syscall.c
+++ b/tools/testing/selftests/powerpc/tm/tm-syscall.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015, Sam Bobroff, IBM Corp.
- * Licensed under GPLv2.
*
* Test the kernel's system call code to ensure that a system call
* made from within an active HTM transaction is aborted with the
diff --git a/tools/testing/selftests/powerpc/tm/tm-tar.c b/tools/testing/selftests/powerpc/tm/tm-tar.c
index f31fe5a28ddb..03be8c47292b 100644
--- a/tools/testing/selftests/powerpc/tm/tm-tar.c
+++ b/tools/testing/selftests/powerpc/tm/tm-tar.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015, Michael Neuling, IBM Corp.
- * Licensed under GPLv2.
* Original: Michael Neuling 19/7/2013
* Edited: Rashmica Gupta 01/12/2015
*
diff --git a/tools/testing/selftests/powerpc/tm/tm-tmspr.c b/tools/testing/selftests/powerpc/tm/tm-tmspr.c
index df1d7d4b1c89..17becf3dcee4 100644
--- a/tools/testing/selftests/powerpc/tm/tm-tmspr.c
+++ b/tools/testing/selftests/powerpc/tm/tm-tmspr.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015, Michael Neuling, IBM Corp.
- * Licensed under GPLv2.
*
* Original: Michael Neuling 3/4/2014
* Modified: Rashmica Gupta 8/12/2015
@@ -21,7 +21,6 @@
* (a) begin transaction
* (b) abort transaction
* (c) check TEXASR to see if FS has been corrupted
- *
*/
#define _GNU_SOURCE
diff --git a/tools/testing/selftests/powerpc/tm/tm-trap.c b/tools/testing/selftests/powerpc/tm/tm-trap.c
index 179d592f0073..601f0c1d450d 100644
--- a/tools/testing/selftests/powerpc/tm/tm-trap.c
+++ b/tools/testing/selftests/powerpc/tm/tm-trap.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2017, Gustavo Romero, IBM Corp.
- * Licensed under GPLv2.
*
* Check if thread endianness is flipped inadvertently to BE on trap
* caught in TM whilst MSR.FP and MSR.VEC are zero (i.e. just after
diff --git a/tools/testing/selftests/powerpc/tm/tm-unavailable.c b/tools/testing/selftests/powerpc/tm/tm-unavailable.c
index 09894f4ff62e..2ca2fccb0a3e 100644
--- a/tools/testing/selftests/powerpc/tm/tm-unavailable.c
+++ b/tools/testing/selftests/powerpc/tm/tm-unavailable.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2017, Gustavo Romero, Breno Leitao, Cyril Bur, IBM Corp.
- * Licensed under GPLv2.
*
* Force FP, VEC and VSX unavailable exception during transaction in all
* possible scenarios regarding the MSR.FP and MSR.VEC state, e.g. when FP
diff --git a/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c b/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
index 137185ba4937..e2a0c07e8362 100644
--- a/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
+++ b/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2017, Michael Neuling, IBM Corp.
- * Licensed under GPLv2.
* Original: Breno Leitao <brenohl@br.ibm.com> &
* Gustavo Bueno Romero <gromero@br.ibm.com>
* Edited: Michael Neuling
diff --git a/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c b/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c
index fe52811584ae..147c6dc4eb0b 100644
--- a/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c
+++ b/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2015, Michael Neuling, IBM Corp.
- * Licensed under GPLv2.
*
* Original: Michael Neuling 4/12/2013
* Edited: Rashmica Gupta 4/12/2015
diff --git a/tools/testing/selftests/powerpc/tm/tm.h b/tools/testing/selftests/powerpc/tm/tm.h
index 5518b1d4ef8b..97f9f491c541 100644
--- a/tools/testing/selftests/powerpc/tm/tm.h
+++ b/tools/testing/selftests/powerpc/tm/tm.h
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2015, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#ifndef _SELFTESTS_POWERPC_TM_TM_H
diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c
index ed62f4153d3e..c02d24835db4 100644
--- a/tools/testing/selftests/powerpc/utils.c
+++ b/tools/testing/selftests/powerpc/utils.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2013-2015, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
*/
#define _GNU_SOURCE /* For CPU_ZERO etc. */
diff --git a/tools/testing/selftests/powerpc/vphn/Makefile b/tools/testing/selftests/powerpc/vphn/Makefile
index fb82068c9fda..18b885da01bd 100644
--- a/tools/testing/selftests/powerpc/vphn/Makefile
+++ b/tools/testing/selftests/powerpc/vphn/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
TEST_GEN_PROGS := test-vphn
CFLAGS += -m64
diff --git a/tools/testing/selftests/powerpc/vphn/vphn.c b/tools/testing/selftests/powerpc/vphn/vphn.c
index 186b906e66d5..1d1f5f2be3b2 120000
--- a/tools/testing/selftests/powerpc/vphn/vphn.c
+++ b/tools/testing/selftests/powerpc/vphn/vphn.c
@@ -1 +1 @@
-../../../../../arch/powerpc/mm/vphn.c \ No newline at end of file
+../../../../../arch/powerpc/mm/book3s64/vphn.c \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/vphn/vphn.h b/tools/testing/selftests/powerpc/vphn/vphn.h
index 7131efe38c65..45fe160f8288 120000
--- a/tools/testing/selftests/powerpc/vphn/vphn.h
+++ b/tools/testing/selftests/powerpc/vphn/vphn.h
@@ -1 +1 @@
-../../../../../arch/powerpc/mm/vphn.h \ No newline at end of file
+../../../../../arch/powerpc/mm/book3s64/vphn.h \ No newline at end of file
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
index 29bac5ef9a93..444ad39d3700 100644
--- a/tools/testing/selftests/proc/.gitignore
+++ b/tools/testing/selftests/proc/.gitignore
@@ -2,6 +2,7 @@
/fd-002-posix-eq
/fd-003-kthread
/proc-loadavg-001
+/proc-pid-vm
/proc-self-map-files-001
/proc-self-map-files-002
/proc-self-syscall
diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile
index 434d033ee067..9f09fcd09ea3 100644
--- a/tools/testing/selftests/proc/Makefile
+++ b/tools/testing/selftests/proc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
CFLAGS += -Wall -O2 -Wno-unused-function
CFLAGS += -D_GNU_SOURCE
@@ -6,6 +7,7 @@ TEST_GEN_PROGS += fd-001-lookup
TEST_GEN_PROGS += fd-002-posix-eq
TEST_GEN_PROGS += fd-003-kthread
TEST_GEN_PROGS += proc-loadavg-001
+TEST_GEN_PROGS += proc-pid-vm
TEST_GEN_PROGS += proc-self-map-files-001
TEST_GEN_PROGS += proc-self-map-files-002
TEST_GEN_PROGS += proc-self-syscall
diff --git a/tools/testing/selftests/proc/proc-loadavg-001.c b/tools/testing/selftests/proc/proc-loadavg-001.c
index fcff7047000d..471e2aa28077 100644
--- a/tools/testing/selftests/proc/proc-loadavg-001.c
+++ b/tools/testing/selftests/proc/proc-loadavg-001.c
@@ -30,7 +30,7 @@ int main(void)
if (unshare(CLONE_NEWPID) == -1) {
if (errno == ENOSYS || errno == EPERM)
- return 2;
+ return 4;
return 1;
}
diff --git a/tools/testing/selftests/proc/proc-pid-vm.c b/tools/testing/selftests/proc/proc-pid-vm.c
new file mode 100644
index 000000000000..853aa164a401
--- /dev/null
+++ b/tools/testing/selftests/proc/proc-pid-vm.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2019 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Fork and exec tiny 1 page executable which precisely controls its VM.
+ * Test /proc/$PID/maps
+ * Test /proc/$PID/smaps
+ * Test /proc/$PID/smaps_rollup
+ * Test /proc/$PID/statm
+ *
+ * FIXME require CONFIG_TMPFS which can be disabled
+ * FIXME test other values from "smaps"
+ * FIXME support other archs
+ */
+#undef NDEBUG
+#include <assert.h>
+#include <errno.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/mount.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/uio.h>
+#include <linux/kdev_t.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+static inline long sys_execveat(int dirfd, const char *pathname, char **argv, char **envp, int flags)
+{
+ return syscall(SYS_execveat, dirfd, pathname, argv, envp, flags);
+}
+
+static void make_private_tmp(void)
+{
+ if (unshare(CLONE_NEWNS) == -1) {
+ if (errno == ENOSYS || errno == EPERM) {
+ exit(4);
+ }
+ exit(1);
+ }
+ if (mount(NULL, "/", NULL, MS_PRIVATE|MS_REC, NULL) == -1) {
+ exit(1);
+ }
+ if (mount(NULL, "/tmp", "tmpfs", 0, NULL) == -1) {
+ exit(1);
+ }
+}
+
+static pid_t pid = -1;
+static void ate(void)
+{
+ if (pid > 0) {
+ kill(pid, SIGTERM);
+ }
+}
+
+struct elf64_hdr {
+ uint8_t e_ident[16];
+ uint16_t e_type;
+ uint16_t e_machine;
+ uint32_t e_version;
+ uint64_t e_entry;
+ uint64_t e_phoff;
+ uint64_t e_shoff;
+ uint32_t e_flags;
+ uint16_t e_ehsize;
+ uint16_t e_phentsize;
+ uint16_t e_phnum;
+ uint16_t e_shentsize;
+ uint16_t e_shnum;
+ uint16_t e_shstrndx;
+};
+
+struct elf64_phdr {
+ uint32_t p_type;
+ uint32_t p_flags;
+ uint64_t p_offset;
+ uint64_t p_vaddr;
+ uint64_t p_paddr;
+ uint64_t p_filesz;
+ uint64_t p_memsz;
+ uint64_t p_align;
+};
+
+#ifdef __x86_64__
+#define PAGE_SIZE 4096
+#define VADDR (1UL << 32)
+#define MAPS_OFFSET 73
+
+#define syscall 0x0f, 0x05
+#define mov_rdi(x) \
+ 0x48, 0xbf, \
+ (x)&0xff, ((x)>>8)&0xff, ((x)>>16)&0xff, ((x)>>24)&0xff, \
+ ((x)>>32)&0xff, ((x)>>40)&0xff, ((x)>>48)&0xff, ((x)>>56)&0xff
+
+#define mov_rsi(x) \
+ 0x48, 0xbe, \
+ (x)&0xff, ((x)>>8)&0xff, ((x)>>16)&0xff, ((x)>>24)&0xff, \
+ ((x)>>32)&0xff, ((x)>>40)&0xff, ((x)>>48)&0xff, ((x)>>56)&0xff
+
+#define mov_eax(x) \
+ 0xb8, (x)&0xff, ((x)>>8)&0xff, ((x)>>16)&0xff, ((x)>>24)&0xff
+
+static const uint8_t payload[] = {
+ /* Casually unmap stack, vDSO and everything else. */
+ /* munmap */
+ mov_rdi(VADDR + 4096),
+ mov_rsi((1ULL << 47) - 4096 - VADDR - 4096),
+ mov_eax(11),
+ syscall,
+
+ /* Ping parent. */
+ /* write(0, &c, 1); */
+ 0x31, 0xff, /* xor edi, edi */
+ 0x48, 0x8d, 0x35, 0x00, 0x00, 0x00, 0x00, /* lea rsi, [rip] */
+ 0xba, 0x01, 0x00, 0x00, 0x00, /* mov edx, 1 */
+ mov_eax(1),
+ syscall,
+
+ /* 1: pause(); */
+ mov_eax(34),
+ syscall,
+
+ 0xeb, 0xf7, /* jmp 1b */
+};
+
+static int make_exe(const uint8_t *payload, size_t len)
+{
+ struct elf64_hdr h;
+ struct elf64_phdr ph;
+
+ struct iovec iov[3] = {
+ {&h, sizeof(struct elf64_hdr)},
+ {&ph, sizeof(struct elf64_phdr)},
+ {(void *)payload, len},
+ };
+ int fd, fd1;
+ char buf[64];
+
+ memset(&h, 0, sizeof(h));
+ h.e_ident[0] = 0x7f;
+ h.e_ident[1] = 'E';
+ h.e_ident[2] = 'L';
+ h.e_ident[3] = 'F';
+ h.e_ident[4] = 2;
+ h.e_ident[5] = 1;
+ h.e_ident[6] = 1;
+ h.e_ident[7] = 0;
+ h.e_type = 2;
+ h.e_machine = 0x3e;
+ h.e_version = 1;
+ h.e_entry = VADDR + sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr);
+ h.e_phoff = sizeof(struct elf64_hdr);
+ h.e_shoff = 0;
+ h.e_flags = 0;
+ h.e_ehsize = sizeof(struct elf64_hdr);
+ h.e_phentsize = sizeof(struct elf64_phdr);
+ h.e_phnum = 1;
+ h.e_shentsize = 0;
+ h.e_shnum = 0;
+ h.e_shstrndx = 0;
+
+ memset(&ph, 0, sizeof(ph));
+ ph.p_type = 1;
+ ph.p_flags = (1<<2)|1;
+ ph.p_offset = 0;
+ ph.p_vaddr = VADDR;
+ ph.p_paddr = 0;
+ ph.p_filesz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + len;
+ ph.p_memsz = sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + len;
+ ph.p_align = 4096;
+
+ fd = openat(AT_FDCWD, "/tmp", O_WRONLY|O_EXCL|O_TMPFILE, 0700);
+ if (fd == -1) {
+ exit(1);
+ }
+
+ if (writev(fd, iov, 3) != sizeof(struct elf64_hdr) + sizeof(struct elf64_phdr) + len) {
+ exit(1);
+ }
+
+ /* Avoid ETXTBSY on exec. */
+ snprintf(buf, sizeof(buf), "/proc/self/fd/%u", fd);
+ fd1 = open(buf, O_RDONLY|O_CLOEXEC);
+ close(fd);
+
+ return fd1;
+}
+#endif
+
+static bool g_vsyscall = false;
+
+static const char str_vsyscall[] =
+"ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]\n";
+
+#ifdef __x86_64__
+/*
+ * vsyscall page can't be unmapped, probe it with memory load.
+ */
+static void vsyscall(void)
+{
+ pid_t pid;
+ int wstatus;
+
+ pid = fork();
+ if (pid < 0) {
+ fprintf(stderr, "fork, errno %d\n", errno);
+ exit(1);
+ }
+ if (pid == 0) {
+ struct rlimit rlim = {0, 0};
+ (void)setrlimit(RLIMIT_CORE, &rlim);
+ *(volatile int *)0xffffffffff600000UL;
+ exit(0);
+ }
+ wait(&wstatus);
+ if (WIFEXITED(wstatus)) {
+ g_vsyscall = true;
+ }
+}
+
+int main(void)
+{
+ int pipefd[2];
+ int exec_fd;
+
+ vsyscall();
+
+ atexit(ate);
+
+ make_private_tmp();
+
+ /* Reserve fd 0 for 1-byte pipe ping from child. */
+ close(0);
+ if (open("/", O_RDONLY|O_DIRECTORY|O_PATH) != 0) {
+ return 1;
+ }
+
+ exec_fd = make_exe(payload, sizeof(payload));
+
+ if (pipe(pipefd) == -1) {
+ return 1;
+ }
+ if (dup2(pipefd[1], 0) != 0) {
+ return 1;
+ }
+
+ pid = fork();
+ if (pid == -1) {
+ return 1;
+ }
+ if (pid == 0) {
+ sys_execveat(exec_fd, "", NULL, NULL, AT_EMPTY_PATH);
+ return 1;
+ }
+
+ char _;
+ if (read(pipefd[0], &_, 1) != 1) {
+ return 1;
+ }
+
+ struct stat st;
+ if (fstat(exec_fd, &st) == -1) {
+ return 1;
+ }
+
+ /* Generate "head -n1 /proc/$PID/maps" */
+ char buf0[256];
+ memset(buf0, ' ', sizeof(buf0));
+ int len = snprintf(buf0, sizeof(buf0),
+ "%08lx-%08lx r-xp 00000000 %02lx:%02lx %llu",
+ VADDR, VADDR + PAGE_SIZE,
+ MAJOR(st.st_dev), MINOR(st.st_dev),
+ (unsigned long long)st.st_ino);
+ buf0[len] = ' ';
+ snprintf(buf0 + MAPS_OFFSET, sizeof(buf0) - MAPS_OFFSET,
+ "/tmp/#%llu (deleted)\n", (unsigned long long)st.st_ino);
+
+ /* Test /proc/$PID/maps */
+ {
+ const size_t len = strlen(buf0) + (g_vsyscall ? strlen(str_vsyscall) : 0);
+ char buf[256];
+ ssize_t rv;
+ int fd;
+
+ snprintf(buf, sizeof(buf), "/proc/%u/maps", pid);
+ fd = open(buf, O_RDONLY);
+ if (fd == -1) {
+ return 1;
+ }
+ rv = read(fd, buf, sizeof(buf));
+ assert(rv == len);
+ assert(memcmp(buf, buf0, strlen(buf0)) == 0);
+ if (g_vsyscall) {
+ assert(memcmp(buf + strlen(buf0), str_vsyscall, strlen(str_vsyscall)) == 0);
+ }
+ }
+
+ /* Test /proc/$PID/smaps */
+ {
+ char buf[4096];
+ ssize_t rv;
+ int fd;
+
+ snprintf(buf, sizeof(buf), "/proc/%u/smaps", pid);
+ fd = open(buf, O_RDONLY);
+ if (fd == -1) {
+ return 1;
+ }
+ rv = read(fd, buf, sizeof(buf));
+ assert(0 <= rv && rv <= sizeof(buf));
+
+ assert(rv >= strlen(buf0));
+ assert(memcmp(buf, buf0, strlen(buf0)) == 0);
+
+#define RSS1 "Rss: 4 kB\n"
+#define RSS2 "Rss: 0 kB\n"
+#define PSS1 "Pss: 4 kB\n"
+#define PSS2 "Pss: 0 kB\n"
+ assert(memmem(buf, rv, RSS1, strlen(RSS1)) ||
+ memmem(buf, rv, RSS2, strlen(RSS2)));
+ assert(memmem(buf, rv, PSS1, strlen(PSS1)) ||
+ memmem(buf, rv, PSS2, strlen(PSS2)));
+
+ static const char *S[] = {
+ "Size: 4 kB\n",
+ "KernelPageSize: 4 kB\n",
+ "MMUPageSize: 4 kB\n",
+ "Anonymous: 0 kB\n",
+ "AnonHugePages: 0 kB\n",
+ "Shared_Hugetlb: 0 kB\n",
+ "Private_Hugetlb: 0 kB\n",
+ "Locked: 0 kB\n",
+ };
+ int i;
+
+ for (i = 0; i < sizeof(S)/sizeof(S[0]); i++) {
+ assert(memmem(buf, rv, S[i], strlen(S[i])));
+ }
+
+ if (g_vsyscall) {
+ assert(memmem(buf, rv, str_vsyscall, strlen(str_vsyscall)));
+ }
+ }
+
+ /* Test /proc/$PID/smaps_rollup */
+ {
+ char bufr[256];
+ memset(bufr, ' ', sizeof(bufr));
+ len = snprintf(bufr, sizeof(bufr),
+ "%08lx-%08lx ---p 00000000 00:00 0",
+ VADDR, VADDR + PAGE_SIZE);
+ bufr[len] = ' ';
+ snprintf(bufr + MAPS_OFFSET, sizeof(bufr) - MAPS_OFFSET,
+ "[rollup]\n");
+
+ char buf[1024];
+ ssize_t rv;
+ int fd;
+
+ snprintf(buf, sizeof(buf), "/proc/%u/smaps_rollup", pid);
+ fd = open(buf, O_RDONLY);
+ if (fd == -1) {
+ return 1;
+ }
+ rv = read(fd, buf, sizeof(buf));
+ assert(0 <= rv && rv <= sizeof(buf));
+
+ assert(rv >= strlen(bufr));
+ assert(memcmp(buf, bufr, strlen(bufr)) == 0);
+
+ assert(memmem(buf, rv, RSS1, strlen(RSS1)) ||
+ memmem(buf, rv, RSS2, strlen(RSS2)));
+ assert(memmem(buf, rv, PSS1, strlen(PSS1)) ||
+ memmem(buf, rv, PSS2, strlen(PSS2)));
+
+ static const char *S[] = {
+ "Anonymous: 0 kB\n",
+ "AnonHugePages: 0 kB\n",
+ "Shared_Hugetlb: 0 kB\n",
+ "Private_Hugetlb: 0 kB\n",
+ "Locked: 0 kB\n",
+ };
+ int i;
+
+ for (i = 0; i < sizeof(S)/sizeof(S[0]); i++) {
+ assert(memmem(buf, rv, S[i], strlen(S[i])));
+ }
+ }
+
+ /* Test /proc/$PID/statm */
+ {
+ char buf[64];
+ ssize_t rv;
+ int fd;
+
+ snprintf(buf, sizeof(buf), "/proc/%u/statm", pid);
+ fd = open(buf, O_RDONLY);
+ if (fd == -1) {
+ return 1;
+ }
+ rv = read(fd, buf, sizeof(buf));
+ assert(rv == 7 * 2);
+
+ assert(buf[0] == '1'); /* ->total_vm */
+ assert(buf[1] == ' ');
+ assert(buf[2] == '0' || buf[2] == '1'); /* rss */
+ assert(buf[3] == ' ');
+ assert(buf[4] == '0' || buf[2] == '1'); /* file rss */
+ assert(buf[5] == ' ');
+ assert(buf[6] == '1'); /* ELF executable segments */
+ assert(buf[7] == ' ');
+ assert(buf[8] == '0');
+ assert(buf[9] == ' ');
+ assert(buf[10] == '0'); /* ->data_vm + ->stack_vm */
+ assert(buf[11] == ' ');
+ assert(buf[12] == '0');
+ assert(buf[13] == '\n');
+ }
+
+ return 0;
+}
+#else
+int main(void)
+{
+ return 4;
+}
+#endif
diff --git a/tools/testing/selftests/proc/proc-self-map-files-002.c b/tools/testing/selftests/proc/proc-self-map-files-002.c
index 85744425b08d..47b7473dedef 100644
--- a/tools/testing/selftests/proc/proc-self-map-files-002.c
+++ b/tools/testing/selftests/proc/proc-self-map-files-002.c
@@ -46,12 +46,9 @@ static void fail(const char *fmt, unsigned long a, unsigned long b)
int main(void)
{
- const unsigned int PAGE_SIZE = sysconf(_SC_PAGESIZE);
-#ifdef __arm__
- unsigned long va = 2 * PAGE_SIZE;
-#else
- unsigned long va = 0;
-#endif
+ const int PAGE_SIZE = sysconf(_SC_PAGESIZE);
+ const unsigned long va_max = 1UL << 32;
+ unsigned long va;
void *p;
int fd;
unsigned long a, b;
@@ -60,10 +57,13 @@ int main(void)
if (fd == -1)
return 1;
- p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
- if (p == MAP_FAILED) {
- if (errno == EPERM)
- return 2;
+ for (va = 0; va < va_max; va += PAGE_SIZE) {
+ p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
+ if (p == (void *)va)
+ break;
+ }
+ if (va == va_max) {
+ fprintf(stderr, "error: mmap doesn't like you\n");
return 1;
}
diff --git a/tools/testing/selftests/proc/proc-self-syscall.c b/tools/testing/selftests/proc/proc-self-syscall.c
index 5ab5f4810e43..9f6d000c0245 100644
--- a/tools/testing/selftests/proc/proc-self-syscall.c
+++ b/tools/testing/selftests/proc/proc-self-syscall.c
@@ -20,7 +20,6 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
-#include <unistd.h>
#include <string.h>
#include <stdio.h>
@@ -39,7 +38,7 @@ int main(void)
fd = open("/proc/self/syscall", O_RDONLY);
if (fd == -1) {
if (errno == ENOENT)
- return 2;
+ return 4;
return 1;
}
diff --git a/tools/testing/selftests/proc/proc-self-wchan.c b/tools/testing/selftests/proc/proc-self-wchan.c
index a38b2fbaa7ad..b467b98a457d 100644
--- a/tools/testing/selftests/proc/proc-self-wchan.c
+++ b/tools/testing/selftests/proc/proc-self-wchan.c
@@ -27,7 +27,7 @@ int main(void)
fd = open("/proc/self/wchan", O_RDONLY);
if (fd == -1) {
if (errno == ENOENT)
- return 2;
+ return 4;
return 1;
}
diff --git a/tools/testing/selftests/proc/read.c b/tools/testing/selftests/proc/read.c
index 563e752e6eba..b3ef9e14d6cc 100644
--- a/tools/testing/selftests/proc/read.c
+++ b/tools/testing/selftests/proc/read.c
@@ -26,8 +26,10 @@
#include <dirent.h>
#include <stdbool.h>
#include <stdlib.h>
+#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
+#include <sys/vfs.h>
#include <fcntl.h>
#include <unistd.h>
@@ -123,10 +125,22 @@ static void f(DIR *d, unsigned int level)
int main(void)
{
DIR *d;
+ struct statfs sfs;
d = opendir("/proc");
if (!d)
+ return 4;
+
+ /* Ensure /proc is proc. */
+ if (fstatfs(dirfd(d), &sfs) == -1) {
+ return 1;
+ }
+ if (sfs.f_type != 0x9fa0) {
+ fprintf(stderr, "error: unexpected f_type %lx\n", (long)sfs.f_type);
return 2;
+ }
+
f(d, 0);
+
return 0;
}
diff --git a/tools/testing/selftests/pstore/common_tests b/tools/testing/selftests/pstore/common_tests
index 3ea64d7cf1cd..4509f0cc9c91 100755
--- a/tools/testing/selftests/pstore/common_tests
+++ b/tools/testing/selftests/pstore/common_tests
@@ -1,11 +1,11 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
# common_tests - Shell script commonly used by pstore test scripts
#
# Copyright (C) Hitachi Ltd., 2015
# Written by Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
#
-# Released under the terms of the GPL v2.
# Utilities
errexit() { # message
diff --git a/tools/testing/selftests/pstore/pstore_crash_test b/tools/testing/selftests/pstore/pstore_crash_test
index 1a4afe5c12b6..2a329bbb4aca 100755
--- a/tools/testing/selftests/pstore/pstore_crash_test
+++ b/tools/testing/selftests/pstore/pstore_crash_test
@@ -1,11 +1,11 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
# pstore_crash_test - Pstore test shell script which causes crash and reboot
#
# Copyright (C) Hitachi Ltd., 2015
# Written by Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
#
-# Released under the terms of the GPL v2.
# exit if pstore backend is not registered
. ./common_tests
diff --git a/tools/testing/selftests/pstore/pstore_post_reboot_tests b/tools/testing/selftests/pstore/pstore_post_reboot_tests
index 22f8df1ad7d4..d6da5e86efbf 100755
--- a/tools/testing/selftests/pstore/pstore_post_reboot_tests
+++ b/tools/testing/selftests/pstore/pstore_post_reboot_tests
@@ -1,11 +1,11 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
# pstore_post_reboot_tests - Check pstore's behavior after crash/reboot
#
# Copyright (C) Hitachi Ltd., 2015
# Written by Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
#
-# Released under the terms of the GPL v2.
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
diff --git a/tools/testing/selftests/pstore/pstore_tests b/tools/testing/selftests/pstore/pstore_tests
index f25d2a349a60..1cef54458aff 100755
--- a/tools/testing/selftests/pstore/pstore_tests
+++ b/tools/testing/selftests/pstore/pstore_tests
@@ -1,11 +1,11 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
# pstore_tests - Check pstore's behavior before crash/reboot
#
# Copyright (C) Hitachi Ltd., 2015
# Written by Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
#
-# Released under the terms of the GPL v2.
. ./common_tests
diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c
index a5d8f0ab0da0..bd4a7247b44f 100644
--- a/tools/testing/selftests/ptp/testptp.c
+++ b/tools/testing/selftests/ptp/testptp.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PTP 1588 clock support - User space test program
*
* Copyright (C) 2010 OMICRON electronics GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define _GNU_SOURCE
#define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */
@@ -63,30 +50,6 @@ static clockid_t get_clockid(int fd)
return (((unsigned int) ~fd) << 3) | CLOCKFD;
}
-static void handle_alarm(int s)
-{
- printf("received signal %d\n", s);
-}
-
-static int install_handler(int signum, void (*handler)(int))
-{
- struct sigaction action;
- sigset_t mask;
-
- /* Unblock the signal. */
- sigemptyset(&mask);
- sigaddset(&mask, signum);
- sigprocmask(SIG_UNBLOCK, &mask, NULL);
-
- /* Install the signal handler. */
- action.sa_handler = handler;
- action.sa_flags = 0;
- sigemptyset(&action.sa_mask);
- sigaction(signum, &action, NULL);
-
- return 0;
-}
-
static long ppb_to_scaled_ppm(int ppb)
{
/*
@@ -112,8 +75,6 @@ static void usage(char *progname)
{
fprintf(stderr,
"usage: %s [options]\n"
- " -a val request a one-shot alarm after 'val' seconds\n"
- " -A val request a periodic alarm every 'val' seconds\n"
" -c query the ptp clock's capabilities\n"
" -d name device to open\n"
" -e val read 'val' external time stamp events\n"
@@ -148,15 +109,9 @@ int main(int argc, char *argv[])
struct ptp_pin_desc desc;
struct timespec ts;
struct timex tx;
-
- static timer_t timerid;
- struct itimerspec timeout;
- struct sigevent sigevent;
-
struct ptp_clock_time *pct;
struct ptp_sys_offset *sysoff;
-
char *progname;
unsigned int i;
int c, cnt, fd;
@@ -170,10 +125,8 @@ int main(int argc, char *argv[])
int gettime = 0;
int index = 0;
int list_pins = 0;
- int oneshot = 0;
int pct_offset = 0;
int n_samples = 0;
- int periodic = 0;
int perout = -1;
int pin_index = -1, pin_func;
int pps = -1;
@@ -185,14 +138,8 @@ int main(int argc, char *argv[])
progname = strrchr(argv[0], '/');
progname = progname ? 1+progname : argv[0];
- while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghi:k:lL:p:P:sSt:T:v"))) {
+ while (EOF != (c = getopt(argc, argv, "cd:e:f:ghi:k:lL:p:P:sSt:T:v"))) {
switch (c) {
- case 'a':
- oneshot = atoi(optarg);
- break;
- case 'A':
- periodic = atoi(optarg);
- break;
case 'c':
capabilities = 1;
break;
@@ -393,49 +340,6 @@ int main(int argc, char *argv[])
}
}
- if (oneshot) {
- install_handler(SIGALRM, handle_alarm);
- /* Create a timer. */
- sigevent.sigev_notify = SIGEV_SIGNAL;
- sigevent.sigev_signo = SIGALRM;
- if (timer_create(clkid, &sigevent, &timerid)) {
- perror("timer_create");
- return -1;
- }
- /* Start the timer. */
- memset(&timeout, 0, sizeof(timeout));
- timeout.it_value.tv_sec = oneshot;
- if (timer_settime(timerid, 0, &timeout, NULL)) {
- perror("timer_settime");
- return -1;
- }
- pause();
- timer_delete(timerid);
- }
-
- if (periodic) {
- install_handler(SIGALRM, handle_alarm);
- /* Create a timer. */
- sigevent.sigev_notify = SIGEV_SIGNAL;
- sigevent.sigev_signo = SIGALRM;
- if (timer_create(clkid, &sigevent, &timerid)) {
- perror("timer_create");
- return -1;
- }
- /* Start the timer. */
- memset(&timeout, 0, sizeof(timeout));
- timeout.it_interval.tv_sec = periodic;
- timeout.it_value.tv_sec = periodic;
- if (timer_settime(timerid, 0, &timeout, NULL)) {
- perror("timer_settime");
- return -1;
- }
- while (1) {
- pause();
- }
- timer_delete(timerid);
- }
-
if (perout >= 0) {
if (clock_gettime(clkid, &ts)) {
perror("clock_gettime");
diff --git a/tools/testing/selftests/ptrace/Makefile b/tools/testing/selftests/ptrace/Makefile
index 8a2bc5562179..cb21c76a18ca 100644
--- a/tools/testing/selftests/ptrace/Makefile
+++ b/tools/testing/selftests/ptrace/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
CFLAGS += -iquote../../../../include/uapi -Wall
TEST_GEN_PROGS := peeksiginfo
diff --git a/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh b/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh
index 43540f1828cc..2deea2169fc2 100755
--- a/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh
+++ b/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Extract the number of CPUs expected from the specified Kconfig-file
# fragment by checking CONFIG_SMP and CONFIG_NR_CPUS. If the specified
@@ -7,23 +8,9 @@
#
# Usage: configNR_CPUS.sh config-frag
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2013
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
cf=$1
if test ! -r $cf
diff --git a/tools/testing/selftests/rcutorture/bin/config_override.sh b/tools/testing/selftests/rcutorture/bin/config_override.sh
index ef7fcbac3d42..90016c359e83 100755
--- a/tools/testing/selftests/rcutorture/bin/config_override.sh
+++ b/tools/testing/selftests/rcutorture/bin/config_override.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# config_override.sh base override
#
@@ -6,23 +7,9 @@
# that conflict with any in override, concatenating what remains and
# sending the result to standard output.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2017
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
base=$1
if test -r $base
diff --git a/tools/testing/selftests/rcutorture/bin/configcheck.sh b/tools/testing/selftests/rcutorture/bin/configcheck.sh
index 197deece7c7c..31584cee84d7 100755
--- a/tools/testing/selftests/rcutorture/bin/configcheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/configcheck.sh
@@ -1,23 +1,11 @@
#!/bin/bash
-# Usage: configcheck.sh .config .config-template
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0+
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
+# Usage: configcheck.sh .config .config-template
#
# Copyright (C) IBM Corporation, 2011
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
T=${TMPDIR-/tmp}/abat-chk-config.sh.$$
trap 'rm -rf $T' 0
@@ -26,6 +14,7 @@ mkdir $T
cat $1 > $T/.config
cat $2 | sed -e 's/\(.*\)=n/# \1 is not set/' -e 's/^#CHECK#//' |
+grep -v '^CONFIG_INITRAMFS_SOURCE' |
awk '
{
print "if grep -q \"" $0 "\" < '"$T/.config"'";
diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh
index 65541c21a544..40359486b3a8 100755
--- a/tools/testing/selftests/rcutorture/bin/configinit.sh
+++ b/tools/testing/selftests/rcutorture/bin/configinit.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Usage: configinit.sh config-spec-file build-output-dir results-dir
#
@@ -14,23 +15,9 @@
# for example, "O=/tmp/foo". If this argument is omitted, the .config
# file will be generated directly in the current directory.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2013
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
T=${TMPDIR-/tmp}/configinit.sh.$$
trap 'rm -rf $T' 0
diff --git a/tools/testing/selftests/rcutorture/bin/cpus2use.sh b/tools/testing/selftests/rcutorture/bin/cpus2use.sh
index bb99cde3f5f9..ff7102212703 100755
--- a/tools/testing/selftests/rcutorture/bin/cpus2use.sh
+++ b/tools/testing/selftests/rcutorture/bin/cpus2use.sh
@@ -1,26 +1,13 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Get an estimate of how CPU-hoggy to be.
#
# Usage: cpus2use.sh
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2013
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
ncpus=`grep '^processor' /proc/cpuinfo | wc -l`
idlecpus=`mpstat | tail -1 | \
diff --git a/tools/testing/selftests/rcutorture/bin/functions.sh b/tools/testing/selftests/rcutorture/bin/functions.sh
index 65f6655026f0..6bcb8b5b2ff2 100644
--- a/tools/testing/selftests/rcutorture/bin/functions.sh
+++ b/tools/testing/selftests/rcutorture/bin/functions.sh
@@ -1,24 +1,11 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Shell functions for the rest of the scripts.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2013
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
# bootparam_hotplug_cpu bootparam-string
#
diff --git a/tools/testing/selftests/rcutorture/bin/jitter.sh b/tools/testing/selftests/rcutorture/bin/jitter.sh
index 3633828375e3..435b60933985 100755
--- a/tools/testing/selftests/rcutorture/bin/jitter.sh
+++ b/tools/testing/selftests/rcutorture/bin/jitter.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Alternate sleeping and spinning on randomly selected CPUs. The purpose
# of this script is to inflict random OS jitter on a concurrently running
@@ -11,23 +12,9 @@
# sleepmax: Maximum microseconds to sleep, defaults to one second.
# spinmax: Maximum microseconds to spin, defaults to one millisecond.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2016
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
me=$(($1 * 1000))
duration=$2
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-build.sh b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
index 9115fcdb5617..c27a0bbb9c02 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
@@ -1,26 +1,13 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Build a kvm-ready Linux kernel from the tree in the current directory.
#
# Usage: kvm-build.sh config-template build-dir resdir
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2011
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
config_template=${1}
if test -z "$config_template" -o ! -f "$config_template" -o ! -r "$config_template"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh b/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh
index 98f650c9bf54..8426fe1f15ee 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh
@@ -1,4 +1,5 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
#
# Invoke a text editor on all console.log files for all runs with diagnostics,
# that is, on all such files having a console.log.diags counterpart.
@@ -10,6 +11,10 @@
#
# The "directory" above should end with the date/time directory, for example,
# "tools/testing/selftests/rcutorture/res/2018.02.25-14:27:27".
+#
+# Copyright (C) IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.ibm.com>
rundir="${1}"
if test -z "$rundir" -o ! -d "$rundir"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
index 2de92f43ee8c..f3a7a5e2b89d 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
@@ -1,26 +1,13 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Analyze a given results directory for locktorture progress.
#
# Usage: kvm-recheck-lock.sh resdir
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2014
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
i="$1"
if test -d "$i" -a -r "$i"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
index 0fa8a61ccb7b..2a7f3f4756a7 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
@@ -1,26 +1,13 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Analyze a given results directory for rcutorture progress.
#
# Usage: kvm-recheck-rcu.sh resdir
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2014
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
i="$1"
if test -d "$i" -a -r "$i"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
index 8948f7926b21..7d3c2be66c64 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Analyze a given results directory for rcuperf performance measurements,
# looking for ftrace data. Exits with 0 if data was found, analyzed, and
@@ -7,23 +8,9 @@
#
# Usage: kvm-recheck-rcuperf-ftrace.sh resdir
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2016
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
i="$1"
. functions.sh
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
index ccebf772fa1e..db0375a57f28 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
@@ -1,26 +1,13 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Analyze a given results directory for rcuperf performance measurements.
#
# Usage: kvm-recheck-rcuperf.sh resdir
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2016
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
i="$1"
if test -d "$i" -a -r "$i"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
index c9bab57a77eb..2adde6aaafdb 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Given the results directories for previous KVM-based torture runs,
# check the build and console output for errors. Given a directory
@@ -6,23 +7,9 @@
#
# Usage: kvm-recheck.sh resdir ...
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2011
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
. functions.sh
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index 58ca758a5786..0eb1ec16d78a 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Run a kvm-based test of the specified tree on the specified configs.
# Fully automated run and error checking, no graphics console.
@@ -20,23 +21,9 @@
#
# More sophisticated argument parsing is clearly needed.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2011
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
T=${TMPDIR-/tmp}/kvm-test-1-run.sh.$$
trap 'rm -rf $T' 0
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 19864f1cb27a..8f1e337b9b54 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Run a series of tests under KVM. By default, this series is specified
# by the relevant CFLIST file, but can be overridden by the --configs
@@ -6,23 +7,9 @@
#
# Usage: kvm.sh [ options ]
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2011
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
scriptname=$0
args="$*"
diff --git a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
index da298394daa2..6fa9bd1ddc09 100755
--- a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
+++ b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
@@ -1,21 +1,8 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Create an initrd directory if one does not already exist.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2013
#
# Author: Connor Shu <Connor.Shu@ibm.com>
@@ -40,17 +27,24 @@ mkdir $T
cat > $T/init << '__EOF___'
#!/bin/sh
# Run in userspace a few milliseconds every second. This helps to
-# exercise the NO_HZ_FULL portions of RCU.
+# exercise the NO_HZ_FULL portions of RCU. The 192 instances of "a" was
+# empirically shown to give a nice multi-millisecond burst of user-mode
+# execution on a 2GHz CPU, as desired. Modern CPUs will vary from a
+# couple of milliseconds up to perhaps 100 milliseconds, which is an
+# acceptable range.
+#
+# Why not calibrate an exact delay? Because within this initrd, we
+# are restricted to Bourne-shell builtins, which as far as I know do not
+# provide any means of obtaining a fine-grained timestamp.
+
+a4="a a a a"
+a16="$a4 $a4 $a4 $a4"
+a64="$a16 $a16 $a16 $a16"
+a192="$a64 $a64 $a64"
while :
do
q=
- for i in \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
- a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
+ for i in $a192
do
q="$q $i"
done
@@ -124,8 +118,8 @@ if echo -e "#if __x86_64__||__i386__||__i486__||__i586__||__i686__" \
| grep -q '^yes'; then
# architecture supported by nolibc
${CROSS_COMPILE}gcc -fno-asynchronous-unwind-tables -fno-ident \
- -nostdlib -include ../bin/nolibc.h -lgcc -s -static -Os \
- -o init init.c
+ -nostdlib -include ../../../../include/nolibc/nolibc.h \
+ -lgcc -s -static -Os -o init init.c
else
${CROSS_COMPILE}gcc -s -static -Os -o init init.c
fi
diff --git a/tools/testing/selftests/rcutorture/bin/parse-build.sh b/tools/testing/selftests/rcutorture/bin/parse-build.sh
index 24fe5f822b28..0701b3bf6ade 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-build.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Check the build output from an rcutorture run for goodness.
# The "file" is a pathname on the local system, and "title" is
@@ -8,23 +9,9 @@
#
# Usage: parse-build.sh file title
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2011
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
F=$1
title=$2
diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh
index 84933f6aed77..4508373a922f 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-console.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Check the console output from an rcutorture run for oopses.
# The "file" is a pathname on the local system, and "title" is
@@ -6,23 +7,9 @@
#
# Usage: parse-console.sh file title
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2011
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
T=${TMPDIR-/tmp}/parse-console.sh.$$
file="$1"
diff --git a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
index 80eb646e1319..d3e4b2971f92 100644
--- a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
@@ -1,24 +1,11 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Kernel-version-dependent shell functions for the rest of the scripts.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2014
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
# locktorture_param_onoff bootparam-string config-file
#
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
index 7bab8246392b..effa415f9b92 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
@@ -1,24 +1,11 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Kernel-version-dependent shell functions for the rest of the scripts.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2013
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
# rcutorture_param_n_barrier_cbs bootparam-string
#
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
index d36b8fd6f0fc..777d5b0c190f 100644
--- a/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
@@ -1,24 +1,11 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
#
# Torture-suite-dependent shell functions for the rest of the scripts.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
# Copyright (C) IBM Corporation, 2015
#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
# per_version_boot_params bootparam-string config-file seconds
#
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h
index d27285f8ee82..8bc960e5e713 100644
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h
+++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h
@@ -59,11 +59,7 @@ typedef __u32 uint32_t;
*
* blkcnt_t is the type of the inode's block count.
*/
-#ifdef CONFIG_LBDAF
typedef u64 sector_t;
-#else
-typedef unsigned long sector_t;
-#endif
/*
* The type of an index into the pagecache.
diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile
index c30c52e1d0d2..d6469535630a 100644
--- a/tools/testing/selftests/rseq/Makefile
+++ b/tools/testing/selftests/rseq/Makefile
@@ -1,5 +1,11 @@
# SPDX-License-Identifier: GPL-2.0+ OR MIT
-CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L./ -Wl,-rpath=./
+
+ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
+CLANG_FLAGS += -no-integrated-as
+endif
+
+CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L./ -Wl,-rpath=./ \
+ $(CLANG_FLAGS)
LDLIBS += -lpthread
# Own dependencies because we only want to build against 1st prerequisite, but
diff --git a/tools/testing/selftests/rseq/rseq-arm.h b/tools/testing/selftests/rseq/rseq-arm.h
index 3cea19877227..84f28f147fb6 100644
--- a/tools/testing/selftests/rseq/rseq-arm.h
+++ b/tools/testing/selftests/rseq/rseq-arm.h
@@ -5,7 +5,54 @@
* (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
-#define RSEQ_SIG 0x53053053
+/*
+ * RSEQ_SIG uses the udf A32 instruction with an uncommon immediate operand
+ * value 0x5de3. This traps if user-space reaches this instruction by mistake,
+ * and the uncommon operand ensures the kernel does not move the instruction
+ * pointer to attacker-controlled code on rseq abort.
+ *
+ * The instruction pattern in the A32 instruction set is:
+ *
+ * e7f5def3 udf #24035 ; 0x5de3
+ *
+ * This translates to the following instruction pattern in the T16 instruction
+ * set:
+ *
+ * little endian:
+ * def3 udf #243 ; 0xf3
+ * e7f5 b.n <7f5>
+ *
+ * pre-ARMv6 big endian code:
+ * e7f5 b.n <7f5>
+ * def3 udf #243 ; 0xf3
+ *
+ * ARMv6+ -mbig-endian generates mixed endianness code vs data: little-endian
+ * code and big-endian data. Ensure the RSEQ_SIG data signature matches code
+ * endianness. Prior to ARMv6, -mbig-endian generates big-endian code and data
+ * (which match), so there is no need to reverse the endianness of the data
+ * representation of the signature. However, the choice between BE32 and BE8
+ * is done by the linker, so we cannot know whether code and data endianness
+ * will be mixed before the linker is invoked.
+ */
+
+#define RSEQ_SIG_CODE 0xe7f5def3
+
+#ifndef __ASSEMBLER__
+
+#define RSEQ_SIG_DATA \
+ ({ \
+ int sig; \
+ asm volatile ("b 2f\n\t" \
+ "1: .inst " __rseq_str(RSEQ_SIG_CODE) "\n\t" \
+ "2:\n\t" \
+ "ldr %[sig], 1b\n\t" \
+ : [sig] "=r" (sig)); \
+ sig; \
+ })
+
+#define RSEQ_SIG RSEQ_SIG_DATA
+
+#endif
#define rseq_smp_mb() __asm__ __volatile__ ("dmb" ::: "memory", "cc")
#define rseq_smp_rmb() __asm__ __volatile__ ("dmb" ::: "memory", "cc")
@@ -30,18 +77,35 @@ do { \
#include "rseq-skip.h"
#else /* !RSEQ_SKIP_FASTPATH */
-#define __RSEQ_ASM_DEFINE_TABLE(version, flags, start_ip, \
+#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
post_commit_offset, abort_ip) \
- ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".pushsection __rseq_cs, \"aw\"\n\t" \
".balign 32\n\t" \
+ __rseq_str(label) ":\n\t" \
".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
+ ".popsection\n\t" \
+ ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
+ ".word " __rseq_str(label) "b, 0x0\n\t" \
".popsection\n\t"
-#define RSEQ_ASM_DEFINE_TABLE(start_ip, post_commit_ip, abort_ip) \
- __RSEQ_ASM_DEFINE_TABLE(0x0, 0x0, start_ip, \
+#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
+ __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
(post_commit_ip - start_ip), abort_ip)
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
+ ".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(exit_ip) ", 0x0\n\t" \
+ ".popsection\n\t"
+
#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
RSEQ_INJECT_ASM(1) \
"adr r0, " __rseq_str(cs_label) "\n\t" \
@@ -61,7 +125,8 @@ do { \
__rseq_str(table_label) ":\n\t" \
".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
- ".word " __rseq_str(RSEQ_SIG) "\n\t" \
+ ".arm\n\t" \
+ ".inst " __rseq_str(RSEQ_SIG_CODE) "\n\t" \
__rseq_str(label) ":\n\t" \
teardown \
"b %l[" __rseq_str(abort_label) "]\n\t"
@@ -86,7 +151,12 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -148,7 +218,12 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -214,7 +289,10 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -266,7 +344,12 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -336,7 +419,12 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -407,7 +495,13 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -485,7 +579,12 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
"str %[src], %[rseq_scratch0]\n\t"
"str %[dst], %[rseq_scratch1]\n\t"
"str %[len], %[rseq_scratch2]\n\t"
@@ -604,7 +703,12 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
"str %[src], %[rseq_scratch0]\n\t"
"str %[dst], %[rseq_scratch1]\n\t"
"str %[len], %[rseq_scratch2]\n\t"
diff --git a/tools/testing/selftests/rseq/rseq-arm64.h b/tools/testing/selftests/rseq/rseq-arm64.h
index 954f34671ca6..200dae9e4208 100644
--- a/tools/testing/selftests/rseq/rseq-arm64.h
+++ b/tools/testing/selftests/rseq/rseq-arm64.h
@@ -6,7 +6,20 @@
* (C) Copyright 2018 - Will Deacon <will.deacon@arm.com>
*/
-#define RSEQ_SIG 0xd428bc00 /* BRK #0x45E0 */
+/*
+ * aarch64 -mbig-endian generates mixed endianness code vs data:
+ * little-endian code and big-endian data. Ensure the RSEQ_SIG signature
+ * matches code endianness.
+ */
+#define RSEQ_SIG_CODE 0xd428bc00 /* BRK #0x45E0. */
+
+#ifdef __AARCH64EB__
+#define RSEQ_SIG_DATA 0x00bc28d4 /* BRK #0x45E0. */
+#else
+#define RSEQ_SIG_DATA RSEQ_SIG_CODE
+#endif
+
+#define RSEQ_SIG RSEQ_SIG_DATA
#define rseq_smp_mb() __asm__ __volatile__ ("dmb ish" ::: "memory")
#define rseq_smp_rmb() __asm__ __volatile__ ("dmb ishld" ::: "memory")
@@ -82,19 +95,35 @@ do { \
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
post_commit_offset, abort_ip) \
- " .pushsection __rseq_table, \"aw\"\n" \
+ " .pushsection __rseq_cs, \"aw\"\n" \
" .balign 32\n" \
__rseq_str(label) ":\n" \
" .long " __rseq_str(version) ", " __rseq_str(flags) "\n" \
" .quad " __rseq_str(start_ip) ", " \
__rseq_str(post_commit_offset) ", " \
__rseq_str(abort_ip) "\n" \
+ " .popsection\n\t" \
+ " .pushsection __rseq_cs_ptr_array, \"aw\"\n" \
+ " .quad " __rseq_str(label) "b\n" \
" .popsection\n"
#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
__RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
(post_commit_ip - start_ip), abort_ip)
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ " .pushsection __rseq_exit_point_array, \"aw\"\n" \
+ " .quad " __rseq_str(start_ip) ", " __rseq_str(exit_ip) "\n" \
+ " .popsection\n"
+
#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
RSEQ_INJECT_ASM(1) \
" adrp " RSEQ_ASM_TMP_REG ", " __rseq_str(cs_label) "\n" \
@@ -105,7 +134,7 @@ do { \
#define RSEQ_ASM_DEFINE_ABORT(label, abort_label) \
" b 222f\n" \
- " .inst " __rseq_str(RSEQ_SIG) "\n" \
+ " .inst " __rseq_str(RSEQ_SIG_CODE) "\n" \
__rseq_str(label) ":\n" \
" b %l[" __rseq_str(abort_label) "]\n" \
"222:\n"
@@ -182,6 +211,11 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+#endif
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
RSEQ_INJECT_ASM(3)
@@ -231,6 +265,11 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+#endif
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
RSEQ_INJECT_ASM(3)
@@ -282,6 +321,9 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+#endif
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
RSEQ_INJECT_ASM(3)
@@ -325,6 +367,11 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+#endif
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
RSEQ_INJECT_ASM(3)
@@ -379,6 +426,11 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+#endif
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
RSEQ_INJECT_ASM(3)
@@ -433,6 +485,12 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error3])
+#endif
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
RSEQ_INJECT_ASM(3)
@@ -490,6 +548,11 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+#endif
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
RSEQ_INJECT_ASM(3)
@@ -545,6 +608,11 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+#endif
RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
RSEQ_INJECT_ASM(3)
diff --git a/tools/testing/selftests/rseq/rseq-mips.h b/tools/testing/selftests/rseq/rseq-mips.h
index 7f48ecf46994..e989e7c14b09 100644
--- a/tools/testing/selftests/rseq/rseq-mips.h
+++ b/tools/testing/selftests/rseq/rseq-mips.h
@@ -7,7 +7,39 @@
* (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
-#define RSEQ_SIG 0x53053053
+/*
+ * RSEQ_SIG uses the break instruction. The instruction pattern is:
+ *
+ * On MIPS:
+ * 0350000d break 0x350
+ *
+ * On nanoMIPS:
+ * 00100350 break 0x350
+ *
+ * On microMIPS:
+ * 0000d407 break 0x350
+ *
+ * For nanoMIPS32 and microMIPS, the instruction stream is encoded as 16-bit
+ * halfwords, so the signature halfwords need to be swapped accordingly for
+ * little-endian.
+ */
+#if defined(__nanomips__)
+# ifdef __MIPSEL__
+# define RSEQ_SIG 0x03500010
+# else
+# define RSEQ_SIG 0x00100350
+# endif
+#elif defined(__mips_micromips)
+# ifdef __MIPSEL__
+# define RSEQ_SIG 0xd4070000
+# else
+# define RSEQ_SIG 0x0000d407
+# endif
+#elif defined(__mips__)
+# define RSEQ_SIG 0x0350000d
+#else
+/* Unknown MIPS architecture. */
+#endif
#define rseq_smp_mb() __asm__ __volatile__ ("sync" ::: "memory")
#define rseq_smp_rmb() rseq_smp_mb()
@@ -54,20 +86,38 @@ do { \
# error unsupported _MIPS_SZLONG
#endif
-#define __RSEQ_ASM_DEFINE_TABLE(version, flags, start_ip, \
+#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
post_commit_offset, abort_ip) \
- ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".pushsection __rseq_cs, \"aw\"\n\t" \
".balign 32\n\t" \
+ __rseq_str(label) ":\n\t" \
".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \
LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \
LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \
+ ".popsection\n\t" \
+ ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(label) "b") "\n\t" \
".popsection\n\t"
-#define RSEQ_ASM_DEFINE_TABLE(start_ip, post_commit_ip, abort_ip) \
- __RSEQ_ASM_DEFINE_TABLE(0x0, 0x0, start_ip, \
+#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
+ __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
(post_commit_ip - start_ip), abort_ip)
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(exit_ip)) "\n\t" \
+ ".popsection\n\t"
+
#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
RSEQ_INJECT_ASM(1) \
LONG_LA " $4, " __rseq_str(cs_label) "\n\t" \
@@ -113,7 +163,12 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -173,7 +228,12 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -237,7 +297,10 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -289,7 +352,12 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -357,7 +425,12 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -426,7 +499,13 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -500,7 +579,12 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
LONG_S " %[src], %[rseq_scratch0]\n\t"
LONG_S " %[dst], %[rseq_scratch1]\n\t"
LONG_S " %[len], %[rseq_scratch2]\n\t"
@@ -616,7 +700,12 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
rseq_workaround_gcc_asm_size_guess();
__asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
LONG_S " %[src], %[rseq_scratch0]\n\t"
LONG_S " %[dst], %[rseq_scratch1]\n\t"
LONG_S " %[len], %[rseq_scratch2]\n\t"
diff --git a/tools/testing/selftests/rseq/rseq-ppc.h b/tools/testing/selftests/rseq/rseq-ppc.h
index 52630c9f42be..76be90196fe4 100644
--- a/tools/testing/selftests/rseq/rseq-ppc.h
+++ b/tools/testing/selftests/rseq/rseq-ppc.h
@@ -6,7 +6,15 @@
* (C) Copyright 2016-2018 - Boqun Feng <boqun.feng@gmail.com>
*/
-#define RSEQ_SIG 0x53053053
+/*
+ * RSEQ_SIG is used with the following trap instruction:
+ *
+ * powerpc-be: 0f e5 00 0b twui r5,11
+ * powerpc64-le: 0b 00 e5 0f twui r5,11
+ * powerpc64-be: 0f e5 00 0b twui r5,11
+ */
+
+#define RSEQ_SIG 0x0fe5000b
#define rseq_smp_mb() __asm__ __volatile__ ("sync" ::: "memory", "cc")
#define rseq_smp_lwsync() __asm__ __volatile__ ("lwsync" ::: "memory", "cc")
@@ -33,8 +41,8 @@ do { \
#else /* !RSEQ_SKIP_FASTPATH */
/*
- * The __rseq_table section can be used by debuggers to better handle
- * single-stepping through the restartable critical sections.
+ * The __rseq_cs_ptr_array and __rseq_cs sections can be used by debuggers to
+ * better handle single-stepping through the restartable critical sections.
*/
#ifdef __PPC64__
@@ -46,11 +54,14 @@ do { \
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
start_ip, post_commit_offset, abort_ip) \
- ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".pushsection __rseq_cs, \"aw\"\n\t" \
".balign 32\n\t" \
__rseq_str(label) ":\n\t" \
".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
".quad " __rseq_str(start_ip) ", " __rseq_str(post_commit_offset) ", " __rseq_str(abort_ip) "\n\t" \
+ ".popsection\n\t" \
+ ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
+ ".quad " __rseq_str(label) "b\n\t" \
".popsection\n\t"
#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
@@ -63,6 +74,19 @@ do { \
"std %%r17, %[" __rseq_str(rseq_cs) "]\n\t" \
__rseq_str(label) ":\n\t"
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
+ ".quad " __rseq_str(start_ip) ", " __rseq_str(exit_ip) "\n\t" \
+ ".popsection\n\t"
+
#else /* #ifdef __PPC64__ */
#define STORE_WORD "stw "
@@ -72,12 +96,29 @@ do { \
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
start_ip, post_commit_offset, abort_ip) \
- ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".pushsection __rseq_cs, \"aw\"\n\t" \
".balign 32\n\t" \
__rseq_str(label) ":\n\t" \
".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
/* 32-bit only supported on BE */ \
".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) "\n\t" \
+ ".popsection\n\t" \
+ ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
+ ".long 0x0, " __rseq_str(label) "b\n\t" \
+ ".popsection\n\t"
+
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
+ /* 32-bit only supported on BE */ \
+ ".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(exit_ip) "\n\t" \
".popsection\n\t"
#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
@@ -169,6 +210,11 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
/* cmp cpuid */
@@ -224,6 +270,11 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
/* cmp cpuid */
@@ -286,6 +337,9 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
/* cmp cpuid */
@@ -337,6 +391,11 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
/* cmp cpuid */
@@ -400,6 +459,11 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
/* cmp cpuid */
@@ -465,6 +529,12 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
/* cmp cpuid */
@@ -532,6 +602,11 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* setup for mempcy */
"mr %%r19, %[len]\n\t"
"mr %%r20, %[src]\n\t"
@@ -601,6 +676,11 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* setup for mempcy */
"mr %%r19, %[len]\n\t"
"mr %%r20, %[src]\n\t"
diff --git a/tools/testing/selftests/rseq/rseq-s390.h b/tools/testing/selftests/rseq/rseq-s390.h
index 1069e85258ce..8ef94ad1cbb4 100644
--- a/tools/testing/selftests/rseq/rseq-s390.h
+++ b/tools/testing/selftests/rseq/rseq-s390.h
@@ -1,6 +1,13 @@
/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
-#define RSEQ_SIG 0x53053053
+/*
+ * RSEQ_SIG uses the trap4 instruction. As Linux does not make use of the
+ * access-register mode nor the linkage stack this instruction will always
+ * cause a special-operation exception (the trap-enabled bit in the DUCT
+ * is and will stay 0). The instruction pattern is
+ * b2 ff 0f ff trap4 4095(%r0)
+ */
+#define RSEQ_SIG 0xB2FF0FFF
#define rseq_smp_mb() __asm__ __volatile__ ("bcr 15,0" ::: "memory")
#define rseq_smp_rmb() rseq_smp_mb()
@@ -37,22 +44,54 @@ do { \
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
start_ip, post_commit_offset, abort_ip) \
- ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".pushsection __rseq_cs, \"aw\"\n\t" \
".balign 32\n\t" \
__rseq_str(label) ":\n\t" \
".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
".quad " __rseq_str(start_ip) ", " __rseq_str(post_commit_offset) ", " __rseq_str(abort_ip) "\n\t" \
+ ".popsection\n\t" \
+ ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
+ ".quad " __rseq_str(label) "b\n\t" \
+ ".popsection\n\t"
+
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
+ ".quad " __rseq_str(start_ip) ", " __rseq_str(exit_ip) "\n\t" \
".popsection\n\t"
#elif __s390__
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
start_ip, post_commit_offset, abort_ip) \
- ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".pushsection __rseq_cs, \"aw\"\n\t" \
".balign 32\n\t" \
__rseq_str(label) ":\n\t" \
".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) "\n\t" \
+ ".popsection\n\t" \
+ ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
+ ".long 0x0, " __rseq_str(label) "b\n\t" \
+ ".popsection\n\t"
+
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
+ ".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(exit_ip) "\n\t" \
".popsection\n\t"
#define LONG_L "l"
@@ -85,14 +124,14 @@ do { \
".long " __rseq_str(RSEQ_SIG) "\n\t" \
__rseq_str(label) ":\n\t" \
teardown \
- "j %l[" __rseq_str(abort_label) "]\n\t" \
+ "jg %l[" __rseq_str(abort_label) "]\n\t" \
".popsection\n\t"
#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \
".pushsection __rseq_failure, \"ax\"\n\t" \
__rseq_str(label) ":\n\t" \
teardown \
- "j %l[" __rseq_str(cmpfail_label) "]\n\t" \
+ "jg %l[" __rseq_str(cmpfail_label) "]\n\t" \
".popsection\n\t"
static inline __attribute__((always_inline))
@@ -102,6 +141,11 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -160,6 +204,11 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -220,6 +269,9 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -268,6 +320,11 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -339,6 +396,12 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
@@ -407,6 +470,11 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
LONG_S " %[src], %[rseq_scratch0]\n\t"
LONG_S " %[dst], %[rseq_scratch1]\n\t"
LONG_S " %[len], %[rseq_scratch2]\n\t"
diff --git a/tools/testing/selftests/rseq/rseq-x86.h b/tools/testing/selftests/rseq/rseq-x86.h
index 089410a314e9..b2da6004fe30 100644
--- a/tools/testing/selftests/rseq/rseq-x86.h
+++ b/tools/testing/selftests/rseq/rseq-x86.h
@@ -7,8 +7,25 @@
#include <stdint.h>
+/*
+ * RSEQ_SIG is used with the following reserved undefined instructions, which
+ * trap in user-space:
+ *
+ * x86-32: 0f b9 3d 53 30 05 53 ud1 0x53053053,%edi
+ * x86-64: 0f b9 3d 53 30 05 53 ud1 0x53053053(%rip),%edi
+ */
#define RSEQ_SIG 0x53053053
+/*
+ * Due to a compiler optimization bug in gcc-8 with asm goto and TLS asm input
+ * operands, we cannot use "m" input operands, and rather pass the __rseq_abi
+ * address through a "r" input operand.
+ */
+
+/* Offset of cpu_id and rseq_cs fields in struct rseq. */
+#define RSEQ_CPU_ID_OFFSET 4
+#define RSEQ_CS_OFFSET 8
+
#ifdef __x86_64__
#define rseq_smp_mb() \
@@ -37,32 +54,49 @@ do { \
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
start_ip, post_commit_offset, abort_ip) \
- ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".pushsection __rseq_cs, \"aw\"\n\t" \
".balign 32\n\t" \
__rseq_str(label) ":\n\t" \
".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
".quad " __rseq_str(start_ip) ", " __rseq_str(post_commit_offset) ", " __rseq_str(abort_ip) "\n\t" \
+ ".popsection\n\t" \
+ ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
+ ".quad " __rseq_str(label) "b\n\t" \
".popsection\n\t"
+
#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
__RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
(post_commit_ip - start_ip), abort_ip)
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
+ ".quad " __rseq_str(start_ip) ", " __rseq_str(exit_ip) "\n\t" \
+ ".popsection\n\t"
+
#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
RSEQ_INJECT_ASM(1) \
"leaq " __rseq_str(cs_label) "(%%rip), %%rax\n\t" \
- "movq %%rax, %[" __rseq_str(rseq_cs) "]\n\t" \
+ "movq %%rax, " __rseq_str(rseq_cs) "\n\t" \
__rseq_str(label) ":\n\t"
#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
RSEQ_INJECT_ASM(2) \
- "cmpl %[" __rseq_str(cpu_id) "], %[" __rseq_str(current_cpu_id) "]\n\t" \
+ "cmpl %[" __rseq_str(cpu_id) "], " __rseq_str(current_cpu_id) "\n\t" \
"jnz " __rseq_str(label) "\n\t"
#define RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label) \
".pushsection __rseq_failure, \"ax\"\n\t" \
- /* Disassembler-friendly signature: nopl <sig>(%rip). */\
- ".byte 0x0f, 0x1f, 0x05\n\t" \
+ /* Disassembler-friendly signature: ud1 <sig>(%rip),%edi. */ \
+ ".byte 0x0f, 0xb9, 0x3d\n\t" \
".long " __rseq_str(RSEQ_SIG) "\n\t" \
__rseq_str(label) ":\n\t" \
teardown \
@@ -83,15 +117,20 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"cmpq %[v], %[expect]\n\t"
"jnz %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"cmpq %[v], %[expect]\n\t"
"jnz %l[error2]\n\t"
#endif
@@ -102,8 +141,7 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
[v] "m" (*v),
[expect] "r" (expect),
[newv] "r" (newv)
@@ -140,16 +178,21 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"movq %[v], %%rbx\n\t"
"cmpq %%rbx, %[expectnot]\n\t"
"je %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"movq %[v], %%rbx\n\t"
"cmpq %%rbx, %[expectnot]\n\t"
"je %l[error2]\n\t"
@@ -164,8 +207,7 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* final store input */
[v] "m" (*v),
[expectnot] "r" (expectnot),
@@ -199,12 +241,15 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
#endif
/* final store */
"addq %[count], %[v]\n\t"
@@ -213,8 +258,7 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* final store input */
[v] "m" (*v),
[count] "er" (count)
@@ -244,15 +288,20 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"cmpq %[v], %[expect]\n\t"
"jnz %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"cmpq %[v], %[expect]\n\t"
"jnz %l[error2]\n\t"
#endif
@@ -266,8 +315,7 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* try store input */
[v2] "m" (*v2),
[newv2] "r" (newv2),
@@ -314,9 +362,15 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"cmpq %[v], %[expect]\n\t"
"jnz %l[cmpfail]\n\t"
@@ -325,7 +379,7 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
"jnz %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(5)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"cmpq %[v], %[expect]\n\t"
"jnz %l[error2]\n\t"
"cmpq %[v2], %[expect2]\n\t"
@@ -338,8 +392,7 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* cmp2 input */
[v2] "m" (*v2),
[expect2] "r" (expect2),
@@ -381,18 +434,23 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
"movq %[src], %[rseq_scratch0]\n\t"
"movq %[dst], %[rseq_scratch1]\n\t"
"movq %[len], %[rseq_scratch2]\n\t"
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"cmpq %[v], %[expect]\n\t"
"jnz 5f\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 6f)
"cmpq %[v], %[expect]\n\t"
"jnz 7f\n\t"
#endif
@@ -440,8 +498,7 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
#endif
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* final store input */
[v] "m" (*v),
[expect] "r" (expect),
@@ -520,31 +577,47 @@ do { \
*/
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
start_ip, post_commit_offset, abort_ip) \
- ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".pushsection __rseq_cs, \"aw\"\n\t" \
".balign 32\n\t" \
__rseq_str(label) ":\n\t" \
".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
".long " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
+ ".popsection\n\t" \
+ ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \
+ ".long " __rseq_str(label) "b, 0x0\n\t" \
".popsection\n\t"
#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
__RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
(post_commit_ip - start_ip), abort_ip)
+/*
+ * Exit points of a rseq critical section consist of all instructions outside
+ * of the critical section where a critical section can either branch to or
+ * reach through the normal course of its execution. The abort IP and the
+ * post-commit IP are already part of the __rseq_cs section and should not be
+ * explicitly defined as additional exit points. Knowing all exit points is
+ * useful to assist debuggers stepping over the critical section.
+ */
+#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \
+ ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \
+ ".long " __rseq_str(start_ip) ", 0x0, " __rseq_str(exit_ip) ", 0x0\n\t" \
+ ".popsection\n\t"
+
#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
RSEQ_INJECT_ASM(1) \
- "movl $" __rseq_str(cs_label) ", %[rseq_cs]\n\t" \
+ "movl $" __rseq_str(cs_label) ", " __rseq_str(rseq_cs) "\n\t" \
__rseq_str(label) ":\n\t"
#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
RSEQ_INJECT_ASM(2) \
- "cmpl %[" __rseq_str(cpu_id) "], %[" __rseq_str(current_cpu_id) "]\n\t" \
+ "cmpl %[" __rseq_str(cpu_id) "], " __rseq_str(current_cpu_id) "\n\t" \
"jnz " __rseq_str(label) "\n\t"
#define RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label) \
".pushsection __rseq_failure, \"ax\"\n\t" \
- /* Disassembler-friendly signature: nopl <sig>. */ \
- ".byte 0x0f, 0x1f, 0x05\n\t" \
+ /* Disassembler-friendly signature: ud1 <sig>,%edi. */ \
+ ".byte 0x0f, 0xb9, 0x3d\n\t" \
".long " __rseq_str(RSEQ_SIG) "\n\t" \
__rseq_str(label) ":\n\t" \
teardown \
@@ -565,15 +638,20 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"cmpl %[v], %[expect]\n\t"
"jnz %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"cmpl %[v], %[expect]\n\t"
"jnz %l[error2]\n\t"
#endif
@@ -584,8 +662,7 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
[v] "m" (*v),
[expect] "r" (expect),
[newv] "r" (newv)
@@ -622,16 +699,21 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"movl %[v], %%ebx\n\t"
"cmpl %%ebx, %[expectnot]\n\t"
"je %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"movl %[v], %%ebx\n\t"
"cmpl %%ebx, %[expectnot]\n\t"
"je %l[error2]\n\t"
@@ -646,8 +728,7 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* final store input */
[v] "m" (*v),
[expectnot] "r" (expectnot),
@@ -681,12 +762,15 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
#endif
/* final store */
"addl %[count], %[v]\n\t"
@@ -695,8 +779,7 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* final store input */
[v] "m" (*v),
[count] "ir" (count)
@@ -726,15 +809,20 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"cmpl %[v], %[expect]\n\t"
"jnz %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"cmpl %[v], %[expect]\n\t"
"jnz %l[error2]\n\t"
#endif
@@ -749,8 +837,7 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* try store input */
[v2] "m" (*v2),
[newv2] "m" (newv2),
@@ -788,16 +875,21 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"movl %[expect], %%eax\n\t"
"cmpl %[v], %%eax\n\t"
"jnz %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"movl %[expect], %%eax\n\t"
"cmpl %[v], %%eax\n\t"
"jnz %l[error2]\n\t"
@@ -813,8 +905,7 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* try store input */
[v2] "m" (*v2),
[newv2] "r" (newv2),
@@ -853,9 +944,15 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"cmpl %[v], %[expect]\n\t"
"jnz %l[cmpfail]\n\t"
@@ -864,7 +961,7 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
"jnz %l[cmpfail]\n\t"
RSEQ_INJECT_ASM(5)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1])
"cmpl %[v], %[expect]\n\t"
"jnz %l[error2]\n\t"
"cmpl %[expect2], %[v2]\n\t"
@@ -878,8 +975,7 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
RSEQ_ASM_DEFINE_ABORT(4, "", abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* cmp2 input */
[v2] "m" (*v2),
[expect2] "r" (expect2),
@@ -922,19 +1018,24 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
"movl %[src], %[rseq_scratch0]\n\t"
"movl %[dst], %[rseq_scratch1]\n\t"
"movl %[len], %[rseq_scratch2]\n\t"
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"movl %[expect], %%eax\n\t"
"cmpl %%eax, %[v]\n\t"
"jnz 5f\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 6f)
"movl %[expect], %%eax\n\t"
"cmpl %%eax, %[v]\n\t"
"jnz 7f\n\t"
@@ -984,8 +1085,7 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
#endif
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* final store input */
[v] "m" (*v),
[expect] "m" (expect),
@@ -1030,19 +1130,24 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
__asm__ __volatile__ goto (
RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
"movl %[src], %[rseq_scratch0]\n\t"
"movl %[dst], %[rseq_scratch1]\n\t"
"movl %[len], %[rseq_scratch2]\n\t"
/* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_CS_OFFSET(%[rseq_abi]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 4f)
RSEQ_INJECT_ASM(3)
"movl %[expect], %%eax\n\t"
"cmpl %%eax, %[v]\n\t"
"jnz 5f\n\t"
RSEQ_INJECT_ASM(4)
#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), 6f)
"movl %[expect], %%eax\n\t"
"cmpl %%eax, %[v]\n\t"
"jnz 7f\n\t"
@@ -1093,8 +1198,7 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
#endif
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [rseq_abi] "r" (&__rseq_abi),
/* final store input */
[v] "m" (*v),
[expect] "m" (expect),
diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
index 4847e97ed049..7159eb777fd3 100644
--- a/tools/testing/selftests/rseq/rseq.c
+++ b/tools/testing/selftests/rseq/rseq.c
@@ -25,18 +25,27 @@
#include <syscall.h>
#include <assert.h>
#include <signal.h>
+#include <limits.h>
#include "rseq.h"
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-__attribute__((tls_model("initial-exec"))) __thread
-volatile struct rseq __rseq_abi = {
+__thread volatile struct rseq __rseq_abi = {
.cpu_id = RSEQ_CPU_ID_UNINITIALIZED,
};
-static __attribute__((tls_model("initial-exec"))) __thread
-volatile int refcount;
+/*
+ * Shared with other libraries. This library may take rseq ownership if it is
+ * still 0 when executing the library constructor. Set to 1 by library
+ * constructor when handling rseq. Set to 0 in destructor if handling rseq.
+ */
+int __rseq_handled;
+
+/* Whether this library have ownership of rseq registration. */
+static int rseq_ownership;
+
+static __thread volatile uint32_t __rseq_refcount;
static void signal_off_save(sigset_t *oldset)
{
@@ -69,8 +78,14 @@ int rseq_register_current_thread(void)
int rc, ret = 0;
sigset_t oldset;
+ if (!rseq_ownership)
+ return 0;
signal_off_save(&oldset);
- if (refcount++)
+ if (__rseq_refcount == UINT_MAX) {
+ ret = -1;
+ goto end;
+ }
+ if (__rseq_refcount++)
goto end;
rc = sys_rseq(&__rseq_abi, sizeof(struct rseq), 0, RSEQ_SIG);
if (!rc) {
@@ -78,9 +93,9 @@ int rseq_register_current_thread(void)
goto end;
}
if (errno != EBUSY)
- __rseq_abi.cpu_id = -2;
+ __rseq_abi.cpu_id = RSEQ_CPU_ID_REGISTRATION_FAILED;
ret = -1;
- refcount--;
+ __rseq_refcount--;
end:
signal_restore(oldset);
return ret;
@@ -91,13 +106,20 @@ int rseq_unregister_current_thread(void)
int rc, ret = 0;
sigset_t oldset;
+ if (!rseq_ownership)
+ return 0;
signal_off_save(&oldset);
- if (--refcount)
+ if (!__rseq_refcount) {
+ ret = -1;
+ goto end;
+ }
+ if (--__rseq_refcount)
goto end;
rc = sys_rseq(&__rseq_abi, sizeof(struct rseq),
RSEQ_FLAG_UNREGISTER, RSEQ_SIG);
if (!rc)
goto end;
+ __rseq_refcount = 1;
ret = -1;
end:
signal_restore(oldset);
@@ -115,3 +137,20 @@ int32_t rseq_fallback_current_cpu(void)
}
return cpu;
}
+
+void __attribute__((constructor)) rseq_init(void)
+{
+ /* Check whether rseq is handled by another library. */
+ if (__rseq_handled)
+ return;
+ __rseq_handled = 1;
+ rseq_ownership = 1;
+}
+
+void __attribute__((destructor)) rseq_fini(void)
+{
+ if (!rseq_ownership)
+ return;
+ __rseq_handled = 0;
+ rseq_ownership = 0;
+}
diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h
index c72eb70f9b52..d40d60e7499e 100644
--- a/tools/testing/selftests/rseq/rseq.h
+++ b/tools/testing/selftests/rseq/rseq.h
@@ -16,7 +16,6 @@
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
-#include <sched.h>
#include <linux/rseq.h>
/*
@@ -45,6 +44,7 @@
#endif
extern __thread volatile struct rseq __rseq_abi;
+extern int __rseq_handled;
#define rseq_likely(x) __builtin_expect(!!(x), 1)
#define rseq_unlikely(x) __builtin_expect(!!(x), 0)
diff --git a/tools/testing/selftests/rseq/run_param_test.sh b/tools/testing/selftests/rseq/run_param_test.sh
index 3acd6d75ff9f..e426304fd4a0 100755
--- a/tools/testing/selftests/rseq/run_param_test.sh
+++ b/tools/testing/selftests/rseq/run_param_test.sh
@@ -1,6 +1,8 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0+ or MIT
+NR_CPUS=`grep '^processor' /proc/cpuinfo | wc -l`
+
EXTRA_ARGS=${@}
OLDIFS="$IFS"
@@ -28,15 +30,16 @@ IFS="$OLDIFS"
REPS=1000
SLOW_REPS=100
+NR_THREADS=$((6*${NR_CPUS}))
function do_tests()
{
local i=0
while [ "$i" -lt "${#TEST_LIST[@]}" ]; do
echo "Running test ${TEST_NAME[$i]}"
- ./param_test ${TEST_LIST[$i]} -r ${REPS} ${@} ${EXTRA_ARGS} || exit 1
+ ./param_test ${TEST_LIST[$i]} -r ${REPS} -t ${NR_THREADS} ${@} ${EXTRA_ARGS} || exit 1
echo "Running compare-twice test ${TEST_NAME[$i]}"
- ./param_test_compare_twice ${TEST_LIST[$i]} -r ${REPS} ${@} ${EXTRA_ARGS} || exit 1
+ ./param_test_compare_twice ${TEST_LIST[$i]} -r ${REPS} -t ${NR_THREADS} ${@} ${EXTRA_ARGS} || exit 1
let "i++"
done
}
diff --git a/tools/testing/selftests/rtc/rtctest.c b/tools/testing/selftests/rtc/rtctest.c
index b2065536d407..66af608fb4c6 100644
--- a/tools/testing/selftests/rtc/rtctest.c
+++ b/tools/testing/selftests/rtc/rtctest.c
@@ -49,7 +49,7 @@ TEST_F(rtc, date_read) {
rtc_tm.tm_hour, rtc_tm.tm_min, rtc_tm.tm_sec);
}
-TEST_F(rtc, uie_read) {
+TEST_F_TIMEOUT(rtc, uie_read, NUM_UIE + 2) {
int i, rc, irq = 0;
unsigned long data;
@@ -211,7 +211,7 @@ TEST_F(rtc, alarm_wkalm_set) {
ASSERT_EQ(new, secs);
}
-TEST_F(rtc, alarm_alm_set_minute) {
+TEST_F_TIMEOUT(rtc, alarm_alm_set_minute, 65) {
struct timeval tv = { .tv_sec = 62 };
unsigned long data;
struct rtc_time tm;
@@ -264,7 +264,7 @@ TEST_F(rtc, alarm_alm_set_minute) {
ASSERT_EQ(new, secs);
}
-TEST_F(rtc, alarm_wkalm_set_minute) {
+TEST_F_TIMEOUT(rtc, alarm_wkalm_set_minute, 65) {
struct timeval tv = { .tv_sec = 62 };
struct rtc_wkalrm alarm = { 0 };
struct rtc_time tm;
diff --git a/tools/testing/selftests/rtc/setdate.c b/tools/testing/selftests/rtc/setdate.c
index 2cb78489eca4..b303890b3de2 100644
--- a/tools/testing/selftests/rtc/setdate.c
+++ b/tools/testing/selftests/rtc/setdate.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/* Real Time Clock Driver Test
* by: Benjamin Gaignard (benjamin.gaignard@linaro.org)
*
* To build
* gcc rtctest_setdate.c -o rtctest_setdate
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/safesetid/.gitignore b/tools/testing/selftests/safesetid/.gitignore
new file mode 100644
index 000000000000..9c1a629bca01
--- /dev/null
+++ b/tools/testing/selftests/safesetid/.gitignore
@@ -0,0 +1 @@
+safesetid-test
diff --git a/tools/testing/selftests/safesetid/Makefile b/tools/testing/selftests/safesetid/Makefile
new file mode 100644
index 000000000000..98da7a504737
--- /dev/null
+++ b/tools/testing/selftests/safesetid/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for mount selftests.
+CFLAGS = -Wall -lcap -O2
+
+TEST_PROGS := run_tests.sh
+TEST_GEN_FILES := safesetid-test
+
+include ../lib.mk
diff --git a/tools/testing/selftests/safesetid/config b/tools/testing/selftests/safesetid/config
new file mode 100644
index 000000000000..9d44e5c2e096
--- /dev/null
+++ b/tools/testing/selftests/safesetid/config
@@ -0,0 +1,2 @@
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
diff --git a/tools/testing/selftests/safesetid/safesetid-test.c b/tools/testing/selftests/safesetid/safesetid-test.c
new file mode 100644
index 000000000000..892c8e8b1b8b
--- /dev/null
+++ b/tools/testing/selftests/safesetid/safesetid-test.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <errno.h>
+#include <pwd.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/capability.h>
+#include <sys/types.h>
+#include <sys/mount.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdarg.h>
+
+#ifndef CLONE_NEWUSER
+# define CLONE_NEWUSER 0x10000000
+#endif
+
+#define ROOT_USER 0
+#define RESTRICTED_PARENT 1
+#define ALLOWED_CHILD1 2
+#define ALLOWED_CHILD2 3
+#define NO_POLICY_USER 4
+
+char* add_whitelist_policy_file = "/sys/kernel/security/safesetid/add_whitelist_policy";
+
+static void die(char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+ exit(EXIT_FAILURE);
+}
+
+static bool vmaybe_write_file(bool enoent_ok, char *filename, char *fmt, va_list ap)
+{
+ char buf[4096];
+ int fd;
+ ssize_t written;
+ int buf_len;
+
+ buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
+ if (buf_len < 0) {
+ printf("vsnprintf failed: %s\n",
+ strerror(errno));
+ return false;
+ }
+ if (buf_len >= sizeof(buf)) {
+ printf("vsnprintf output truncated\n");
+ return false;
+ }
+
+ fd = open(filename, O_WRONLY);
+ if (fd < 0) {
+ if ((errno == ENOENT) && enoent_ok)
+ return true;
+ return false;
+ }
+ written = write(fd, buf, buf_len);
+ if (written != buf_len) {
+ if (written >= 0) {
+ printf("short write to %s\n", filename);
+ return false;
+ } else {
+ printf("write to %s failed: %s\n",
+ filename, strerror(errno));
+ return false;
+ }
+ }
+ if (close(fd) != 0) {
+ printf("close of %s failed: %s\n",
+ filename, strerror(errno));
+ return false;
+ }
+ return true;
+}
+
+static bool write_file(char *filename, char *fmt, ...)
+{
+ va_list ap;
+ bool ret;
+
+ va_start(ap, fmt);
+ ret = vmaybe_write_file(false, filename, fmt, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+static void ensure_user_exists(uid_t uid)
+{
+ struct passwd p;
+
+ FILE *fd;
+ char name_str[10];
+
+ if (getpwuid(uid) == NULL) {
+ memset(&p,0x00,sizeof(p));
+ fd=fopen("/etc/passwd","a");
+ if (fd == NULL)
+ die("couldn't open file\n");
+ if (fseek(fd, 0, SEEK_END))
+ die("couldn't fseek\n");
+ snprintf(name_str, 10, "%d", uid);
+ p.pw_name=name_str;
+ p.pw_uid=uid;
+ p.pw_gecos="Test account";
+ p.pw_dir="/dev/null";
+ p.pw_shell="/bin/false";
+ int value = putpwent(&p,fd);
+ if (value != 0)
+ die("putpwent failed\n");
+ if (fclose(fd))
+ die("fclose failed\n");
+ }
+}
+
+static void ensure_securityfs_mounted(void)
+{
+ int fd = open(add_whitelist_policy_file, O_WRONLY);
+ if (fd < 0) {
+ if (errno == ENOENT) {
+ // Need to mount securityfs
+ if (mount("securityfs", "/sys/kernel/security",
+ "securityfs", 0, NULL) < 0)
+ die("mounting securityfs failed\n");
+ } else {
+ die("couldn't find securityfs for unknown reason\n");
+ }
+ } else {
+ if (close(fd) != 0) {
+ die("close of %s failed: %s\n",
+ add_whitelist_policy_file, strerror(errno));
+ }
+ }
+}
+
+static void write_policies(void)
+{
+ ssize_t written;
+ int fd;
+
+ fd = open(add_whitelist_policy_file, O_WRONLY);
+ if (fd < 0)
+ die("cant open add_whitelist_policy file\n");
+ written = write(fd, "1:2", strlen("1:2"));
+ if (written != strlen("1:2")) {
+ if (written >= 0) {
+ die("short write to %s\n", add_whitelist_policy_file);
+ } else {
+ die("write to %s failed: %s\n",
+ add_whitelist_policy_file, strerror(errno));
+ }
+ }
+ written = write(fd, "1:3", strlen("1:3"));
+ if (written != strlen("1:3")) {
+ if (written >= 0) {
+ die("short write to %s\n", add_whitelist_policy_file);
+ } else {
+ die("write to %s failed: %s\n",
+ add_whitelist_policy_file, strerror(errno));
+ }
+ }
+ if (close(fd) != 0) {
+ die("close of %s failed: %s\n",
+ add_whitelist_policy_file, strerror(errno));
+ }
+}
+
+static bool test_userns(bool expect_success)
+{
+ uid_t uid;
+ char map_file_name[32];
+ size_t sz = sizeof(map_file_name);
+ pid_t cpid;
+ bool success;
+
+ uid = getuid();
+
+ int clone_flags = CLONE_NEWUSER;
+ cpid = syscall(SYS_clone, clone_flags, NULL);
+ if (cpid == -1) {
+ printf("clone failed");
+ return false;
+ }
+
+ if (cpid == 0) { /* Code executed by child */
+ // Give parent 1 second to write map file
+ sleep(1);
+ exit(EXIT_SUCCESS);
+ } else { /* Code executed by parent */
+ if(snprintf(map_file_name, sz, "/proc/%d/uid_map", cpid) < 0) {
+ printf("preparing file name string failed");
+ return false;
+ }
+ success = write_file(map_file_name, "0 0 1", uid);
+ return success == expect_success;
+ }
+
+ printf("should not reach here");
+ return false;
+}
+
+static void test_setuid(uid_t child_uid, bool expect_success)
+{
+ pid_t cpid, w;
+ int wstatus;
+
+ cpid = fork();
+ if (cpid == -1) {
+ die("fork\n");
+ }
+
+ if (cpid == 0) { /* Code executed by child */
+ setuid(child_uid);
+ if (getuid() == child_uid)
+ exit(EXIT_SUCCESS);
+ else
+ exit(EXIT_FAILURE);
+ } else { /* Code executed by parent */
+ do {
+ w = waitpid(cpid, &wstatus, WUNTRACED | WCONTINUED);
+ if (w == -1) {
+ die("waitpid\n");
+ }
+
+ if (WIFEXITED(wstatus)) {
+ if (WEXITSTATUS(wstatus) == EXIT_SUCCESS) {
+ if (expect_success) {
+ return;
+ } else {
+ die("unexpected success\n");
+ }
+ } else {
+ if (expect_success) {
+ die("unexpected failure\n");
+ } else {
+ return;
+ }
+ }
+ } else if (WIFSIGNALED(wstatus)) {
+ if (WTERMSIG(wstatus) == 9) {
+ if (expect_success)
+ die("killed unexpectedly\n");
+ else
+ return;
+ } else {
+ die("unexpected signal: %d\n", wstatus);
+ }
+ } else {
+ die("unexpected status: %d\n", wstatus);
+ }
+ } while (!WIFEXITED(wstatus) && !WIFSIGNALED(wstatus));
+ }
+
+ die("should not reach here\n");
+}
+
+static void ensure_users_exist(void)
+{
+ ensure_user_exists(ROOT_USER);
+ ensure_user_exists(RESTRICTED_PARENT);
+ ensure_user_exists(ALLOWED_CHILD1);
+ ensure_user_exists(ALLOWED_CHILD2);
+ ensure_user_exists(NO_POLICY_USER);
+}
+
+static void drop_caps(bool setid_retained)
+{
+ cap_value_t cap_values[] = {CAP_SETUID, CAP_SETGID};
+ cap_t caps;
+
+ caps = cap_get_proc();
+ if (setid_retained)
+ cap_set_flag(caps, CAP_EFFECTIVE, 2, cap_values, CAP_SET);
+ else
+ cap_clear(caps);
+ cap_set_proc(caps);
+ cap_free(caps);
+}
+
+int main(int argc, char **argv)
+{
+ ensure_users_exist();
+ ensure_securityfs_mounted();
+ write_policies();
+
+ if (prctl(PR_SET_KEEPCAPS, 1L))
+ die("Error with set keepcaps\n");
+
+ // First test to make sure we can write userns mappings from a user
+ // that doesn't have any restrictions (as long as it has CAP_SETUID);
+ setuid(NO_POLICY_USER);
+ setgid(NO_POLICY_USER);
+
+ // Take away all but setid caps
+ drop_caps(true);
+
+ // Need PR_SET_DUMPABLE flag set so we can write /proc/[pid]/uid_map
+ // from non-root parent process.
+ if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0))
+ die("Error with set dumpable\n");
+
+ if (!test_userns(true)) {
+ die("test_userns failed when it should work\n");
+ }
+
+ setuid(RESTRICTED_PARENT);
+ setgid(RESTRICTED_PARENT);
+
+ test_setuid(ROOT_USER, false);
+ test_setuid(ALLOWED_CHILD1, true);
+ test_setuid(ALLOWED_CHILD2, true);
+ test_setuid(NO_POLICY_USER, false);
+
+ if (!test_userns(false)) {
+ die("test_userns worked when it should fail\n");
+ }
+
+ // Now take away all caps
+ drop_caps(false);
+ test_setuid(2, false);
+ test_setuid(3, false);
+ test_setuid(4, false);
+
+ // NOTE: this test doesn't clean up users that were created in
+ // /etc/passwd or flush policies that were added to the LSM.
+ return EXIT_SUCCESS;
+}
diff --git a/tools/testing/selftests/safesetid/safesetid-test.sh b/tools/testing/selftests/safesetid/safesetid-test.sh
new file mode 100755
index 000000000000..e4fdce675c54
--- /dev/null
+++ b/tools/testing/selftests/safesetid/safesetid-test.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+TCID="safesetid-test.sh"
+errcode=0
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+check_root()
+{
+ uid=$(id -u)
+ if [ $uid -ne 0 ]; then
+ echo $TCID: must be run as root >&2
+ exit $ksft_skip
+ fi
+}
+
+main_function()
+{
+ check_root
+ ./safesetid-test
+}
+
+main_function
+echo "$TCID: done"
+exit $errcode
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 7e632b465ab4..dc66fe852768 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by the GPLv2 license.
*
* Test code for seccomp bpf.
*/
@@ -2166,11 +2166,14 @@ TEST(detect_seccomp_filter_flags)
SECCOMP_FILTER_FLAG_LOG,
SECCOMP_FILTER_FLAG_SPEC_ALLOW,
SECCOMP_FILTER_FLAG_NEW_LISTENER };
- unsigned int flag, all_flags;
+ unsigned int exclusive[] = {
+ SECCOMP_FILTER_FLAG_TSYNC,
+ SECCOMP_FILTER_FLAG_NEW_LISTENER };
+ unsigned int flag, all_flags, exclusive_mask;
int i;
long ret;
- /* Test detection of known-good filter flags */
+ /* Test detection of individual known-good filter flags */
for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
int bits = 0;
@@ -2197,16 +2200,29 @@ TEST(detect_seccomp_filter_flags)
all_flags |= flag;
}
- /* Test detection of all known-good filter flags */
- ret = seccomp(SECCOMP_SET_MODE_FILTER, all_flags, NULL);
- EXPECT_EQ(-1, ret);
- EXPECT_EQ(EFAULT, errno) {
- TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
- all_flags);
+ /*
+ * Test detection of all known-good filter flags combined. But
+ * for the exclusive flags we need to mask them out and try them
+ * individually for the "all flags" testing.
+ */
+ exclusive_mask = 0;
+ for (i = 0; i < ARRAY_SIZE(exclusive); i++)
+ exclusive_mask |= exclusive[i];
+ for (i = 0; i < ARRAY_SIZE(exclusive); i++) {
+ flag = all_flags & ~exclusive_mask;
+ flag |= exclusive[i];
+
+ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EFAULT, errno) {
+ TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
+ flag);
+ }
}
- /* Test detection of an unknown filter flag */
+ /* Test detection of an unknown filter flags, without exclusives. */
flag = -1;
+ flag &= ~exclusive_mask;
ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EINVAL, errno) {
@@ -2611,6 +2627,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
{
long ret, sib;
void *status;
+ struct timespec delay = { .tv_nsec = 100000000 };
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
@@ -2664,7 +2681,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
/* Poll for actual task death. pthread_join doesn't guarantee it. */
while (!kill(self->sibling[sib].system_tid, 0))
- sleep(0.1);
+ nanosleep(&delay, NULL);
/* Switch to the remaining sibling */
sib = !sib;
@@ -2689,7 +2706,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
EXPECT_EQ(0, (long)status);
/* Poll for actual task death. pthread_join doesn't guarantee it. */
while (!kill(self->sibling[sib].system_tid, 0))
- sleep(0.1);
+ nanosleep(&delay, NULL);
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
@@ -2971,6 +2988,12 @@ TEST(get_metadata)
struct seccomp_metadata md;
long ret;
+ /* Only real root can get metadata. */
+ if (geteuid()) {
+ XFAIL(return, "get_metadata requires real root");
+ return;
+ }
+
ASSERT_EQ(0, pipe(pipefd));
pid = fork();
@@ -2985,11 +3008,11 @@ TEST(get_metadata)
};
/* one with log, one without */
- ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER,
+ EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER,
SECCOMP_FILTER_FLAG_LOG, &prog));
- ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog));
+ EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog));
- ASSERT_EQ(0, close(pipefd[0]));
+ EXPECT_EQ(0, close(pipefd[0]));
ASSERT_EQ(1, write(pipefd[1], "1", 1));
ASSERT_EQ(0, close(pipefd[1]));
@@ -3062,14 +3085,19 @@ TEST(user_notification_basic)
.filter = filter,
};
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ ASSERT_EQ(0, ret) {
+ TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+ }
+
pid = fork();
ASSERT_GE(pid, 0);
/* Check that we get -ENOSYS with no listener attached */
if (pid == 0) {
- if (user_trap_syscall(__NR_getpid, 0) < 0)
+ if (user_trap_syscall(__NR_getppid, 0) < 0)
exit(1);
- ret = syscall(__NR_getpid);
+ ret = syscall(__NR_getppid);
exit(ret >= 0 || errno != ENOSYS);
}
@@ -3077,19 +3105,19 @@ TEST(user_notification_basic)
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
- /* Add some no-op filters so for grins. */
+ /* Add some no-op filters for grins. */
EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
/* Check that the basic notification machinery works */
- listener = user_trap_syscall(__NR_getpid,
+ listener = user_trap_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
/* Installing a second listener in the chain should EBUSY */
- EXPECT_EQ(user_trap_syscall(__NR_getpid,
+ EXPECT_EQ(user_trap_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER),
-1);
EXPECT_EQ(errno, EBUSY);
@@ -3098,7 +3126,7 @@ TEST(user_notification_basic)
ASSERT_GE(pid, 0);
if (pid == 0) {
- ret = syscall(__NR_getpid);
+ ret = syscall(__NR_getppid);
exit(ret != USER_NOTIF_MAGIC);
}
@@ -3116,7 +3144,7 @@ TEST(user_notification_basic)
EXPECT_GT(poll(&pollfd, 1, -1), 0);
EXPECT_EQ(pollfd.revents, POLLOUT);
- EXPECT_EQ(req.data.nr, __NR_getpid);
+ EXPECT_EQ(req.data.nr, __NR_getppid);
resp.id = req.id;
resp.error = 0;
@@ -3143,7 +3171,12 @@ TEST(user_notification_kill_in_middle)
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
- listener = user_trap_syscall(__NR_getpid,
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ ASSERT_EQ(0, ret) {
+ TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+ }
+
+ listener = user_trap_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
@@ -3155,7 +3188,7 @@ TEST(user_notification_kill_in_middle)
ASSERT_GE(pid, 0);
if (pid == 0) {
- ret = syscall(__NR_getpid);
+ ret = syscall(__NR_getppid);
exit(ret != USER_NOTIF_MAGIC);
}
@@ -3190,6 +3223,11 @@ TEST(user_notification_signal)
struct seccomp_notif_resp resp = {};
char c;
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ ASSERT_EQ(0, ret) {
+ TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+ }
+
ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0);
listener = user_trap_syscall(__NR_gettid,
@@ -3255,7 +3293,12 @@ TEST(user_notification_closed_listener)
long ret;
int status, listener;
- listener = user_trap_syscall(__NR_getpid,
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ ASSERT_EQ(0, ret) {
+ TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+ }
+
+ listener = user_trap_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
@@ -3266,7 +3309,7 @@ TEST(user_notification_closed_listener)
ASSERT_GE(pid, 0);
if (pid == 0) {
close(listener);
- ret = syscall(__NR_getpid);
+ ret = syscall(__NR_getppid);
exit(ret != -1 && errno != ENOSYS);
}
@@ -3287,16 +3330,17 @@ TEST(user_notification_child_pid_ns)
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
- ASSERT_EQ(unshare(CLONE_NEWPID), 0);
+ ASSERT_EQ(unshare(CLONE_NEWUSER | CLONE_NEWPID), 0);
- listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER);
+ listener = user_trap_syscall(__NR_getppid,
+ SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0)
- exit(syscall(__NR_getpid) != USER_NOTIF_MAGIC);
+ exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
EXPECT_EQ(req.pid, pid);
@@ -3324,7 +3368,12 @@ TEST(user_notification_sibling_pid_ns)
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
- listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER);
+ ASSERT_EQ(prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0), 0) {
+ TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+ }
+
+ listener = user_trap_syscall(__NR_getppid,
+ SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
@@ -3337,7 +3386,7 @@ TEST(user_notification_sibling_pid_ns)
ASSERT_GE(pid2, 0);
if (pid2 == 0)
- exit(syscall(__NR_getpid) != USER_NOTIF_MAGIC);
+ exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
EXPECT_EQ(waitpid(pid2, &status, 0), pid2);
EXPECT_EQ(true, WIFEXITED(status));
@@ -3346,11 +3395,11 @@ TEST(user_notification_sibling_pid_ns)
}
/* Create the sibling ns, and sibling in it. */
- EXPECT_EQ(unshare(CLONE_NEWPID), 0);
- EXPECT_EQ(errno, 0);
+ ASSERT_EQ(unshare(CLONE_NEWPID), 0);
+ ASSERT_EQ(errno, 0);
pid2 = fork();
- EXPECT_GE(pid2, 0);
+ ASSERT_GE(pid2, 0);
if (pid2 == 0) {
ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
@@ -3358,7 +3407,7 @@ TEST(user_notification_sibling_pid_ns)
* The pid should be 0, i.e. the task is in some namespace that
* we can't "see".
*/
- ASSERT_EQ(req.pid, 0);
+ EXPECT_EQ(req.pid, 0);
resp.id = req.id;
resp.error = 0;
@@ -3386,14 +3435,17 @@ TEST(user_notification_fault_recv)
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
- listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER);
+ ASSERT_EQ(unshare(CLONE_NEWUSER), 0);
+
+ listener = user_trap_syscall(__NR_getppid,
+ SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0)
- exit(syscall(__NR_getpid) != USER_NOTIF_MAGIC);
+ exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
/* Do a bad recv() */
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, NULL), -1);
diff --git a/tools/testing/selftests/sigaltstack/Makefile b/tools/testing/selftests/sigaltstack/Makefile
index f68fbf80d8be..3e96d5d47036 100644
--- a/tools/testing/selftests/sigaltstack/Makefile
+++ b/tools/testing/selftests/sigaltstack/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
CFLAGS = -Wall
TEST_GEN_PROGS = sas
diff --git a/tools/testing/selftests/sigaltstack/sas.c b/tools/testing/selftests/sigaltstack/sas.c
index 228c2ae47687..ad0f8df2ca0a 100644
--- a/tools/testing/selftests/sigaltstack/sas.c
+++ b/tools/testing/selftests/sigaltstack/sas.c
@@ -109,6 +109,7 @@ int main(void)
int err;
ksft_print_header();
+ ksft_set_plan(3);
sigemptyset(&act.sa_mask);
act.sa_flags = SA_ONSTACK | SA_SIGINFO;
diff --git a/tools/testing/selftests/size/Makefile b/tools/testing/selftests/size/Makefile
index 4685b3e421fc..b87facc00a6e 100644
--- a/tools/testing/selftests/size/Makefile
+++ b/tools/testing/selftests/size/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
CFLAGS := -static -ffreestanding -nostartfiles -s
TEST_GEN_PROGS := get_size
diff --git a/tools/testing/selftests/size/get_size.c b/tools/testing/selftests/size/get_size.c
index d4b59ab979a0..2ad45b944355 100644
--- a/tools/testing/selftests/size/get_size.c
+++ b/tools/testing/selftests/size/get_size.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2014 Sony Mobile Communications Inc.
*
- * Licensed under the terms of the GNU GPL License version 2
- *
* Selftest for runtime system size
*
* Prints the amount of RAM that the currently running system is using.
diff --git a/tools/testing/selftests/static_keys/Makefile b/tools/testing/selftests/static_keys/Makefile
index 9cdadf37f114..aa64104c7860 100644
--- a/tools/testing/selftests/static_keys/Makefile
+++ b/tools/testing/selftests/static_keys/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
# Makefile for static keys selftests
# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
diff --git a/tools/testing/selftests/sync/sync_test.c b/tools/testing/selftests/sync/sync_test.c
index 7f7938263c5c..3824b66f41a0 100644
--- a/tools/testing/selftests/sync/sync_test.c
+++ b/tools/testing/selftests/sync/sync_test.c
@@ -86,6 +86,7 @@ int main(void)
int err;
ksft_print_header();
+ ksft_set_plan(3 + 7);
sync_api_supported();
diff --git a/tools/testing/selftests/sysctl/Makefile b/tools/testing/selftests/sysctl/Makefile
index 95c320b354e8..110301f9f5be 100644
--- a/tools/testing/selftests/sysctl/Makefile
+++ b/tools/testing/selftests/sysctl/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
# Makefile for sysctl selftests.
# Expects kernel.sysctl_writes_strict=1.
diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh
index 584eb8ea780a..6a970b127c9b 100755
--- a/tools/testing/selftests/sysctl/sysctl.sh
+++ b/tools/testing/selftests/sysctl/sysctl.sh
@@ -24,19 +24,21 @@ TEST_FILE=$(mktemp)
# This represents
#
-# TEST_ID:TEST_COUNT:ENABLED
+# TEST_ID:TEST_COUNT:ENABLED:TARGET
#
# TEST_ID: is the test id number
# TEST_COUNT: number of times we should run the test
# ENABLED: 1 if enabled, 0 otherwise
+# TARGET: test target file required on the test_sysctl module
#
# Once these are enabled please leave them as-is. Write your own test,
# we have tons of space.
-ALL_TESTS="0001:1:1"
-ALL_TESTS="$ALL_TESTS 0002:1:1"
-ALL_TESTS="$ALL_TESTS 0003:1:1"
-ALL_TESTS="$ALL_TESTS 0004:1:1"
-ALL_TESTS="$ALL_TESTS 0005:3:1"
+ALL_TESTS="0001:1:1:int_0001"
+ALL_TESTS="$ALL_TESTS 0002:1:1:string_0001"
+ALL_TESTS="$ALL_TESTS 0003:1:1:int_0002"
+ALL_TESTS="$ALL_TESTS 0004:1:1:uint_0001"
+ALL_TESTS="$ALL_TESTS 0005:3:1:int_0003"
+ALL_TESTS="$ALL_TESTS 0006:50:1:bitmap_0001"
test_modprobe()
{
@@ -149,6 +151,9 @@ reset_vals()
string_0001)
VAL="(none)"
;;
+ bitmap_0001)
+ VAL=""
+ ;;
*)
;;
esac
@@ -157,8 +162,10 @@ reset_vals()
set_orig()
{
- if [ ! -z $TARGET ]; then
- echo "${ORIG}" > "${TARGET}"
+ if [ ! -z $TARGET ] && [ ! -z $ORIG ]; then
+ if [ -f ${TARGET} ]; then
+ echo "${ORIG}" > "${TARGET}"
+ fi
fi
}
@@ -177,9 +184,25 @@ verify()
return 0
}
+# proc files get read a page at a time, which can confuse diff,
+# and get you incorrect results on proc files with long data. To use
+# diff against them you must first extract the output to a file, and
+# then compare against that file.
+verify_diff_proc_file()
+{
+ TMP_DUMP_FILE=$(mktemp)
+ cat $1 > $TMP_DUMP_FILE
+
+ if ! diff -w -q $TMP_DUMP_FILE $2; then
+ return 1
+ else
+ return 0
+ fi
+}
+
verify_diff_w()
{
- echo "$TEST_STR" | diff -q -w -u - $1
+ echo "$TEST_STR" | diff -q -w -u - $1 > /dev/null
return $?
}
@@ -290,6 +313,58 @@ run_numerictests()
test_rc
}
+check_failure()
+{
+ echo -n "Testing that $1 fails as expected..."
+ reset_vals
+ TEST_STR="$1"
+ orig="$(cat $TARGET)"
+ echo -n "$TEST_STR" > $TARGET 2> /dev/null
+
+ # write should fail and $TARGET should retain its original value
+ if [ $? = 0 ] || [ "$(cat $TARGET)" != "$orig" ]; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ fi
+ test_rc
+}
+
+run_wideint_tests()
+{
+ # sysctl conversion functions receive a boolean sign and ulong
+ # magnitude; here we list the magnitudes we want to test (each of
+ # which will be tested in both positive and negative forms). Since
+ # none of these values fit in 32 bits, writing them to an int- or
+ # uint-typed sysctl should fail.
+ local magnitudes=(
+ # common boundary-condition values (zero, +1, -1, INT_MIN,
+ # and INT_MAX respectively) if truncated to lower 32 bits
+ # (potential for being falsely deemed in range)
+ 0x0000000100000000
+ 0x0000000100000001
+ 0x00000001ffffffff
+ 0x0000000180000000
+ 0x000000017fffffff
+
+ # these look like negatives, but without a leading '-' are
+ # actually large positives (should be rejected as above
+ # despite being zero/+1/-1/INT_MIN/INT_MAX in the lower 32)
+ 0xffffffff00000000
+ 0xffffffff00000001
+ 0xffffffffffffffff
+ 0xffffffff80000000
+ 0xffffffff7fffffff
+ )
+
+ for sign in '' '-'; do
+ for mag in "${magnitudes[@]}"; do
+ check_failure "${sign}${mag}"
+ done
+ done
+}
+
# Your test must accept digits 3 and 4 to use this
run_limit_digit()
{
@@ -548,20 +623,82 @@ run_stringtests()
test_rc
}
+target_exists()
+{
+ TARGET="${SYSCTL}/$1"
+ TEST_ID="$2"
+
+ if [ ! -f ${TARGET} ] ; then
+ echo "Target for test $TEST_ID: $TARGET not exist, skipping test ..."
+ return 0
+ fi
+ return 1
+}
+
+run_bitmaptest() {
+ # Total length of bitmaps string to use, a bit under
+ # the maximum input size of the test node
+ LENGTH=$((RANDOM % 65000))
+
+ # First bit to set
+ BIT=$((RANDOM % 1024))
+
+ # String containing our list of bits to set
+ TEST_STR=$BIT
+
+ # build up the string
+ while [ "${#TEST_STR}" -le "$LENGTH" ]; do
+ # Make sure next entry is discontiguous,
+ # skip ahead at least 2
+ BIT=$((BIT + $((2 + RANDOM % 10))))
+
+ # Add new bit to the list
+ TEST_STR="${TEST_STR},${BIT}"
+
+ # Randomly make it a range
+ if [ "$((RANDOM % 2))" -eq "1" ]; then
+ RANGE_END=$((BIT + $((1 + RANDOM % 10))))
+ TEST_STR="${TEST_STR}-${RANGE_END}"
+ BIT=$RANGE_END
+ fi
+ done
+
+ echo -n "Checking bitmap handler... "
+ TEST_FILE=$(mktemp)
+ echo -n "$TEST_STR" > $TEST_FILE
+
+ cat $TEST_FILE > $TARGET 2> /dev/null
+ if [ $? -ne 0 ]; then
+ echo "FAIL" >&2
+ rc=1
+ test_rc
+ fi
+
+ if ! verify_diff_proc_file "$TARGET" "$TEST_FILE"; then
+ echo "FAIL" >&2
+ rc=1
+ else
+ echo "ok"
+ rc=0
+ fi
+ test_rc
+}
+
sysctl_test_0001()
{
- TARGET="${SYSCTL}/int_0001"
+ TARGET="${SYSCTL}/$(get_test_target 0001)"
reset_vals
ORIG=$(cat "${TARGET}")
TEST_STR=$(( $ORIG + 1 ))
run_numerictests
+ run_wideint_tests
run_limit_digit
}
sysctl_test_0002()
{
- TARGET="${SYSCTL}/string_0001"
+ TARGET="${SYSCTL}/$(get_test_target 0002)"
reset_vals
ORIG=$(cat "${TARGET}")
TEST_STR="Testing sysctl"
@@ -574,37 +711,47 @@ sysctl_test_0002()
sysctl_test_0003()
{
- TARGET="${SYSCTL}/int_0002"
+ TARGET="${SYSCTL}/$(get_test_target 0003)"
reset_vals
ORIG=$(cat "${TARGET}")
TEST_STR=$(( $ORIG + 1 ))
run_numerictests
+ run_wideint_tests
run_limit_digit
run_limit_digit_int
}
sysctl_test_0004()
{
- TARGET="${SYSCTL}/uint_0001"
+ TARGET="${SYSCTL}/$(get_test_target 0004)"
reset_vals
ORIG=$(cat "${TARGET}")
TEST_STR=$(( $ORIG + 1 ))
run_numerictests
+ run_wideint_tests
run_limit_digit
run_limit_digit_uint
}
sysctl_test_0005()
{
- TARGET="${SYSCTL}/int_0003"
+ TARGET="${SYSCTL}/$(get_test_target 0005)"
reset_vals
ORIG=$(cat "${TARGET}")
run_limit_digit_int_array
}
+sysctl_test_0006()
+{
+ TARGET="${SYSCTL}/bitmap_0001"
+ reset_vals
+ ORIG=""
+ run_bitmaptest
+}
+
list_tests()
{
echo "Test ID list:"
@@ -618,10 +765,9 @@ list_tests()
echo "0003 x $(get_test_count 0003) - tests proc_dointvec()"
echo "0004 x $(get_test_count 0004) - tests proc_douintvec()"
echo "0005 x $(get_test_count 0005) - tests proc_douintvec() array"
+ echo "0006 x $(get_test_count 0006) - tests proc_do_large_bitmap()"
}
-test_reqs
-
usage()
{
NUM_TESTS=$(grep -o ' ' <<<"$ALL_TESTS" | grep -c .)
@@ -669,25 +815,35 @@ function get_test_count()
{
test_num $1
TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}')
- LAST_TWO=${TEST_DATA#*:*}
- echo ${LAST_TWO%:*}
+ echo ${TEST_DATA} | awk -F":" '{print $2}'
}
function get_test_enabled()
{
test_num $1
TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}')
- echo ${TEST_DATA#*:*:}
+ echo ${TEST_DATA} | awk -F":" '{print $3}'
+}
+
+function get_test_target()
+{
+ test_num $1
+ TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}')
+ echo ${TEST_DATA} | awk -F":" '{print $4}'
}
function run_all_tests()
{
for i in $ALL_TESTS ; do
- TEST_ID=${i%:*:*}
+ TEST_ID=${i%:*:*:*}
ENABLED=$(get_test_enabled $TEST_ID)
TEST_COUNT=$(get_test_count $TEST_ID)
+ TEST_TARGET=$(get_test_target $TEST_ID)
+ if target_exists $TEST_TARGET $TEST_ID; then
+ continue
+ fi
if [[ $ENABLED -eq "1" ]]; then
- test_case $TEST_ID $TEST_COUNT
+ test_case $TEST_ID $TEST_COUNT $TEST_TARGET
fi
done
}
@@ -720,12 +876,14 @@ function watch_case()
function test_case()
{
- NUM_TESTS=$DEFAULT_NUM_TESTS
- if [ $# -eq 2 ]; then
- NUM_TESTS=$2
- fi
+ NUM_TESTS=$2
i=0
+
+ if target_exists $3 $1; then
+ continue
+ fi
+
while [ $i -lt $NUM_TESTS ]; do
test_num $1
watch_log $i ${TEST_NAME}_test_$1 noclear
@@ -748,15 +906,15 @@ function parse_args()
elif [[ "$1" = "-t" ]]; then
shift
test_num $1
- test_case $1 $(get_test_count $1)
+ test_case $1 $(get_test_count $1) $(get_test_target $1)
elif [[ "$1" = "-c" ]]; then
shift
test_num $1
test_num $2
- test_case $1 $2
+ test_case $1 $2 $(get_test_target $1)
elif [[ "$1" = "-s" ]]; then
shift
- test_case $1 1
+ test_case $1 1 $(get_test_target $1)
elif [[ "$1" = "-l" ]]; then
list_tests
elif [[ "$1" = "-h" || "$1" = "--help" ]]; then
@@ -770,8 +928,8 @@ function parse_args()
test_reqs
allow_user_defaults
check_production_sysctl_writes_strict
-test_modprobe
load_req_mod
+test_modprobe
trap "test_finish" EXIT
diff --git a/tools/testing/selftests/tc-testing/.gitignore b/tools/testing/selftests/tc-testing/.gitignore
index c5cc160948b3..c26d72e0166f 100644
--- a/tools/testing/selftests/tc-testing/.gitignore
+++ b/tools/testing/selftests/tc-testing/.gitignore
@@ -3,3 +3,4 @@ __pycache__/
plugins/
*.xml
*.tap
+tdc_config_local.py
diff --git a/tools/testing/selftests/tc-testing/TdcPlugin.py b/tools/testing/selftests/tc-testing/TdcPlugin.py
index 1d9e279331eb..b980a565fa89 100644
--- a/tools/testing/selftests/tc-testing/TdcPlugin.py
+++ b/tools/testing/selftests/tc-testing/TdcPlugin.py
@@ -18,13 +18,13 @@ class TdcPlugin:
if self.args.verbose > 1:
print(' -- {}.post_suite'.format(self.sub_class))
- def pre_case(self, test_ordinal, testid, test_name):
+ def pre_case(self, testid, test_name, test_skip):
'''run commands before test_runner does one test'''
if self.args.verbose > 1:
print(' -- {}.pre_case'.format(self.sub_class))
self.args.testid = testid
self.args.test_name = test_name
- self.args.test_ordinal = test_ordinal
+ self.args.test_skip = test_skip
def post_case(self):
'''run commands after test_runner does one test'''
diff --git a/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt b/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt
index 17b267dedbd9..a28571aff0e1 100644
--- a/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt
+++ b/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt
@@ -33,6 +33,11 @@ Each test case has required data:
id: A unique alphanumeric value to identify a particular test case
name: Descriptive name that explains the command under test
+skip: A completely optional key, if the corresponding value is "yes"
+ then tdc will not execute the test case in question. However,
+ this test case will still appear in the results output but
+ marked as skipped. This key can be placed anywhere inside the
+ test case at the top level.
category: A list of single-word descriptions covering what the command
under test is testing. Example: filter, actions, u32, gact, etc.
setup: The list of commands required to ensure the command under test
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
index e00c798de0bb..4bb866575ea1 100644
--- a/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
+++ b/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
@@ -102,6 +102,14 @@ class SubPlugin(TdcPlugin):
if not self.args.valgrind:
return
+ res = TestResult('{}-mem'.format(self.args.testid),
+ '{} memory leak check'.format(self.args.test_name))
+ if self.args.test_skip:
+ res.set_result(ResultState.skip)
+ res.set_errormsg('Test case designated as skipped.')
+ self._add_results(res)
+ return
+
self.definitely_lost_re = re.compile(
r'definitely lost:\s+([,0-9]+)\s+bytes in\s+([,0-9]+)\sblocks', re.MULTILINE | re.DOTALL)
self.indirectly_lost_re = re.compile(
@@ -134,8 +142,6 @@ class SubPlugin(TdcPlugin):
nle_num = int(nle_mo.group(1))
mem_results = ''
- res = TestResult('{}-mem'.format(self.args.testid),
- '{} memory leak check'.format(self.args.test_name))
if (def_num > 0) or (ind_num > 0) or (pos_num > 0) or (nle_num > 0):
mem_results += 'not '
res.set_result(ResultState.fail)
@@ -146,12 +152,6 @@ class SubPlugin(TdcPlugin):
self._add_results(res)
- mem_results += 'ok {} - {}-mem # {}\n'.format(
- self.args.test_ordinal, self.args.testid, 'memory leak check')
- self._add_to_tap(mem_results)
- if mem_results.startswith('not '):
- print('{}'.format(content))
- self._add_to_tap(content)
def _add_results(self, res):
self._tsr.add_resultdata(res)
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
index 5970cee6d05f..b074ea9b6fe8 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
@@ -286,5 +286,30 @@
"teardown": [
"$TC action flush action bpf"
]
+ },
+ {
+ "id": "b8a1",
+ "name": "Replace bpf action with invalid goto_chain control",
+ "category": [
+ "actions",
+ "bpf"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action bpf",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC action add action bpf bytecode '1,6 0 0 4294967295' pass index 90"
+ ],
+ "cmdUnderTest": "$TC action replace action bpf bytecode '1,6 0 0 4294967295' goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC action list action bpf",
+ "matchPattern": "action order [0-9]*: bpf.* default-action pass.*index 90",
+ "matchCount": "1",
+ "teardown": [
+ "$TC action flush action bpf"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
index 13147a1f5731..cadde8f41fcd 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
@@ -287,5 +287,30 @@
"teardown": [
"$TC actions flush action connmark"
]
+ },
+ {
+ "id": "c506",
+ "name": "Replace connmark with invalid goto chain control",
+ "category": [
+ "actions",
+ "connmark"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action connmark",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action connmark pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action connmark goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action connmark index 90",
+ "matchPattern": "action order [0-9]+: connmark zone 0 pass.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action connmark"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
index a022792d392a..ddabb2fbb7c7 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
@@ -500,5 +500,30 @@
"matchPattern": "^[ \t]+index [0-9]+ ref",
"matchCount": "0",
"teardown": []
+ },
+ {
+ "id": "d128",
+ "name": "Replace csum action with invalid goto chain control",
+ "category": [
+ "actions",
+ "csum"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action csum",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action csum iph index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action csum iph goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action csum index 90",
+ "matchPattern": "action order [0-9]*: csum \\(iph\\) action pass.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action csum"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
index 89189a03ce3d..814b7a8a478b 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
@@ -560,5 +560,30 @@
"teardown": [
"$TC actions flush action gact"
]
+ },
+ {
+ "id": "ca89",
+ "name": "Replace gact action with invalid goto chain control",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action pass random determ drop 2 index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action goto chain 42 random determ drop 5 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order [0-9]*: gact action pass.*random type determ drop val 2.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action gact"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
index 0da3545cabdb..c13a68b98fc7 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
@@ -1060,5 +1060,30 @@
"matchPattern": "action order [0-9]*: ife encode action pipe.*allow prio.*index 4",
"matchCount": "0",
"teardown": []
+ },
+ {
+ "id": "a0e2",
+ "name": "Replace ife encode action with invalid goto chain control",
+ "category": [
+ "actions",
+ "ife"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action ife",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action ife encode allow mark pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action ife encode allow mark goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action ife index 90",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E .*allow mark.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action ife"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
index db49fd0f8445..6e5fb3d25681 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
@@ -434,5 +434,30 @@
"teardown": [
"$TC actions flush action mirred"
]
+ },
+ {
+ "id": "2a9a",
+ "name": "Replace mirred action with invalid goto chain control",
+ "category": [
+ "actions",
+ "mirred"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action mirred",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action mirred ingress mirror dev lo drop index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action mirred ingress mirror dev lo goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action mirred index 90",
+ "matchPattern": "action order [0-9]*: mirred \\(Ingress Mirror to device lo\\) drop.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action mirred"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json b/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json
index 0080dc2fd41c..bc12c1ccad30 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json
@@ -589,5 +589,30 @@
"teardown": [
"$TC actions flush action nat"
]
+ },
+ {
+ "id": "4b12",
+ "name": "Replace nat action with invalid goto chain control",
+ "category": [
+ "actions",
+ "nat"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action nat",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action nat ingress 1.18.1.1 1.18.2.2 drop index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action nat ingress 1.18.1.1 1.18.2.2 goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action nat index 90",
+ "matchPattern": "action order [0-9]+: nat ingress 1.18.1.1/32 1.18.2.2 drop.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action nat"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
new file mode 100644
index 000000000000..0d319f1d01db
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
@@ -0,0 +1,954 @@
+[
+ {
+ "id": "319a",
+ "name": "Add pedit action that mangles IP TTL",
+ "category": [
+ "actions",
+ "pedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge ip ttl set 10",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions ls action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pass keys 1.*index 1 ref.*key #0 at ipv4\\+8: val 0a000000 mask 00ffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "7e67",
+ "name": "Replace pedit action with invalid goto chain",
+ "category": [
+ "actions",
+ "pedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action pedit ex munge ip ttl set 10 pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action pedit ex munge ip ttl set 10 goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions ls action pedit",
+ "matchPattern": "action order [0-9]+: pedit action pass keys 1.*index 90 ref.*key #0 at ipv4\\+8: val 0a000000 mask 00ffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "377e",
+ "name": "Add pedit action with RAW_OP offset u32",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 12 u32 set 0x90abcdef",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": "12: val 90abcdef mask 00000000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "a0ca",
+ "name": "Add pedit action with RAW_OP offset u32 (INVALID)",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 2 u32 set 0x12345678",
+ "expExitCode": "255",
+ "verifyCmd": "/bin/true",
+ "matchPattern": " ",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "dd8a",
+ "name": "Add pedit action with RAW_OP offset u16 u16",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 12 u16 set 0x1234 munge offset 14 u16 set 0x5678",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": "val 12340000 mask 0000ffff.*val 00005678 mask ffff0000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "53db",
+ "name": "Add pedit action with RAW_OP offset u16 (INVALID)",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 15 u16 set 0x1234",
+ "expExitCode": "255",
+ "verifyCmd": "/bin/true",
+ "matchPattern": " ",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "5c7e",
+ "name": "Add pedit action with RAW_OP offset u8 add value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge offset 16 u8 add 0xf",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": " 16: add 0f000000 mask 00ffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "2893",
+ "name": "Add pedit action with RAW_OP offset u8 quad",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 12 u8 set 0x12 munge offset 13 u8 set 0x34 munge offset 14 u8 set 0x56 munge offset 15 u8 set 0x78",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": "val 12000000 mask 00ffffff.*val 00340000 mask ff00ffff.*val 00005600 mask ffff00ff.*val 00000078 mask ffffff00",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "3a07",
+ "name": "Add pedit action with RAW_OP offset u8-u16-u8",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0 u8 set 0x12 munge offset 1 u16 set 0x3456 munge offset 3 u8 set 0x78",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": "val 12000000 mask 00ffffff.*val 00345600 mask ff0000ff.*val 00000078 mask ffffff00",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "ab0f",
+ "name": "Add pedit action with RAW_OP offset u16-u8-u8",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0 u16 set 0x1234 munge offset 2 u8 set 0x56 munge offset 3 u8 set 0x78",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": "val 12340000 mask 0000ffff.*val 00005600 mask ffff00ff.*val 00000078 mask ffffff00",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "9d12",
+ "name": "Add pedit action with RAW_OP offset u32 set u16 clear u8 invert",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0 u32 set 0x12345678 munge offset 1 u16 clear munge offset 2 u8 invert",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": "val 12345678 mask 00000000.*val 00000000 mask ff0000ff.*val 0000ff00 mask ffffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "ebfa",
+ "name": "Add pedit action with RAW_OP offset overflow u32 (INVALID)",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 0xffffffffffffffffffffffffffffffffffffffffff u32 set 0x1",
+ "expExitCode": "255",
+ "verifyCmd": "/bin/true",
+ "matchPattern": " ",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "f512",
+ "name": "Add pedit action with RAW_OP offset u16 at offmask shift set",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 12 u16 at 12 ffff 1 set 0xaaaa",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": " 12: val aaaa0000 mask 0000ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "c2cb",
+ "name": "Add pedit action with RAW_OP offset u32 retain value",
+ "category": [
+ "actions",
+ "pedit",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge offset 12 u32 set 0x12345678 retain 0xff00",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": " 12: val 00005600 mask ffff00ff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "86d4",
+ "name": "Add pedit action with LAYERED_OP eth set src & dst",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge eth src set 11:22:33:44:55:66 munge eth dst set ff:ee:dd:cc:bb:aa",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": "eth\\+4: val 00001122 mask ffff0000.*eth\\+8: val 33445566 mask 00000000.*eth\\+0: val ffeeddcc mask 00000000.*eth\\+4: val bbaa0000 mask 0000ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "c715",
+ "name": "Add pedit action with LAYERED_OP eth set src (INVALID)",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge eth src set %e:11:m2:33:x4:-5",
+ "expExitCode": "255",
+ "verifyCmd": "/bin/true",
+ "matchPattern": " ",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "ba22",
+ "name": "Add pedit action with LAYERED_OP eth type set/clear sequence",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge eth type set 0x1 munge eth type clear munge eth type set 0x1 munge eth type clear munge eth type set 0x1 munge eth type clear munge eth type set 0x1 munge eth type clear munge eth type set 0x1 munge eth type clear munge eth type set 0x1 munge eth type clear",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": "eth\\+12: val 00010000 mask 0000ffff.*eth\\+12: val 00000000 mask 0000ffff.*eth\\+12: val 00010000 mask 0000ffff.*eth\\+12: val 00000000 mask 0000ffff.*eth\\+12: val 00010000 mask 0000ffff.*eth\\+12: val 00000000 mask 0000ffff.*eth\\+12: val 00010000 mask 0000ffff.*eth\\+12: val 00000000 mask 0000ffff.*eth\\+12: val 00010000 mask 0000ffff.*eth\\+12: val 00000000 mask 0000ffff.*eth\\+12: val 00010000 mask 0000ffff.*eth\\+12: val 00000000 mask 0000ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "5810",
+ "name": "Add pedit action with LAYERED_OP ip set src & dst",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge ip src set 18.52.86.120 munge ip dst set 18.52.86.120",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": " 12: val 12345678 mask 00000000.* 16: val 12345678 mask 00000000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "1092",
+ "name": "Add pedit action with LAYERED_OP ip set ihl & dsfield",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge ip ihl set 0xff munge ip dsfield set 0xff",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": " 0: val 0f000000 mask f0ffffff.* 0: val 00ff0000 mask ff00ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "02d8",
+ "name": "Add pedit action with LAYERED_OP ip set ttl & protocol",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge ip ttl set 0x1 munge ip protocol set 0xff",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": " 8: val 01000000 mask 00ffffff.* 8: val 00ff0000 mask ff00ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "3e2d",
+ "name": "Add pedit action with LAYERED_OP ip set ttl (INVALID)",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge ip ttl set 300",
+ "expExitCode": "255",
+ "verifyCmd": "/bin/true",
+ "matchPattern": " ",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "31ae",
+ "name": "Add pedit action with LAYERED_OP ip ttl clear/set",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge ip ttl clear munge ip ttl set 0x1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": " 8: val 00000000 mask 00ffffff.* 8: val 01000000 mask 00ffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "486f",
+ "name": "Add pedit action with LAYERED_OP ip set duplicate fields",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge ip ttl set 0x1 munge ip ttl set 0x1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": " 8: val 01000000 mask 00ffffff.* 8: val 01000000 mask 00ffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "e790",
+ "name": "Add pedit action with LAYERED_OP ip set ce, df, mf, firstfrag, nofrag fields",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge ip ce set 0xff munge ip df set 0xff munge ip mf set 0xff munge ip firstfrag set 0xff munge ip nofrag set 0xff",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": " 4: val 00008000 mask ffff7fff.* 4: val 00004000 mask ffffbfff.* 4: val 00002000 mask ffffdfff.* 4: val 00001f00 mask ffffe0ff.* 4: val 00003f00 mask ffffc0ff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "6829",
+ "name": "Add pedit action with LAYERED_OP beyond ip set dport & sport",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge ip dport set 0x1234 munge ip sport set 0x5678",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": " 20: val 00001234 mask ffff0000.* 20: val 56780000 mask 0000ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "afd8",
+ "name": "Add pedit action with LAYERED_OP beyond ip set icmp_type & icmp_code",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit munge ip icmp_type set 0xff munge ip icmp_code set 0xff",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": " 20: val ff000000 mask 00ffffff.* 20: val ff000000 mask 00ffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "3143",
+ "name": "Add pedit action with LAYERED_OP beyond ip set dport (INVALID)",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge ip dport set 0x1234",
+ "expExitCode": "255",
+ "verifyCmd": "/bin/true",
+ "matchPattern": " ",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "fc1f",
+ "name": "Add pedit action with LAYERED_OP ip6 set src & dst",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge ip6 src set 2001:0db8:0:f101::1 munge ip6 dst set 2001:0db8:0:f101::1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": "ipv6\\+8: val 20010db8 mask 00000000.*ipv6\\+12: val 0000f101 mask 00000000.*ipv6\\+16: val 00000000 mask 00000000.*ipv6\\+20: val 00000001 mask 00000000.*ipv6\\+24: val 20010db8 mask 00000000.*ipv6\\+28: val 0000f101 mask 00000000.*ipv6\\+32: val 00000000 mask 00000000.*ipv6\\+36: val 00000001 mask 00000000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "6d34",
+ "name": "Add pedit action with LAYERED_OP ip6 dst retain value (INVALID)",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge ip6 dst set 2001:0db8:0:f101::1 retain 0xff0000",
+ "expExitCode": "255",
+ "verifyCmd": "/bin/true",
+ "matchPattern": " ",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "6f5e",
+ "name": "Add pedit action with LAYERED_OP ip6 flow_lbl",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge ip6 flow_lbl set 0xfffff",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": "ipv6\\+0: val 0007ffff mask fff80000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "6795",
+ "name": "Add pedit action with LAYERED_OP ip6 set payload_len, nexthdr, hoplimit",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge ip6 payload_len set 0xffff munge ip6 nexthdr set 0xff munge ip6 hoplimit set 0xff",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": "ipv6\\+4: val ffff0000 mask 0000ffff.*ipv6\\+4: val 0000ff00 mask ffff00ff.*ipv6\\+4: val 000000ff mask ffffff00",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "1442",
+ "name": "Add pedit action with LAYERED_OP tcp set dport & sport",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge tcp dport set 4789 munge tcp sport set 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": "tcp\\+0: val 000012b5 mask ffff0000.*tcp\\+0: val 00010000 mask 0000ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "b7ac",
+ "name": "Add pedit action with LAYERED_OP tcp sport set (INVALID)",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge tcp sport set -200",
+ "expExitCode": "255",
+ "verifyCmd": "/bin/true",
+ "matchPattern": " ",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "cfcc",
+ "name": "Add pedit action with LAYERED_OP tcp flags set",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge tcp flags set 0x16",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": "tcp\\+12: val 00160000 mask ff00ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "3bc4",
+ "name": "Add pedit action with LAYERED_OP tcp set dport, sport & flags fields",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge tcp dport set 4789 munge tcp sport set 1 munge tcp flags set 0x1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": "tcp\\+0: val 000012b5 mask ffff0000.*tcp\\+0: val 00010000 mask 0000ffff.*tcp\\+12: val 00010000 mask ff00ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "f1c8",
+ "name": "Add pedit action with LAYERED_OP udp set dport & sport",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge udp dport set 4789 munge udp sport set 4789",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": "udp\\+0: val 000012b5 mask ffff0000.*udp\\+0: val 12b50000 mask 0000ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "d784",
+ "name": "Add pedit action with mixed RAW/LAYERED_OP #1",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge eth src set 11:22:33:44:55:66 munge ip ttl set 0xff munge tcp flags clear munge offset 15 u8 add 40 retain 0xf0 munge udp dport add 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": "eth\\+4: val 00001122 mask ffff0000.*eth\\+8: val 33445566 mask 00000000.*ipv4\\+8: val ff000000 mask 00ffffff.*tcp\\+12: val 00000000 mask ff00ffff.* 12: add 00000020 mask ffffff0f.*udp\\+0: add 00000001 mask ffff0000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ },
+ {
+ "id": "70ca",
+ "name": "Add pedit action with mixed RAW/LAYERED_OP #2",
+ "category": [
+ "actions",
+ "pedit",
+ "layered_op",
+ "raw_op"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action pedit",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action pedit ex munge eth src set 11:22:33:44:55:66 munge eth dst set ff:ee:dd:cc:bb:aa munge ip6 payload_len set 0xffff munge ip6 nexthdr set 0xff munge ip6 hoplimit preserve munge offset 0 u8 set 0x12 munge offset 1 u16 set 0x3456 munge offset 3 u8 set 0x78 munge ip ttl set 0xaa munge ip protocol set 0xff",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action pedit | grep 'key '",
+ "matchPattern": "eth\\+4: val 00001122 mask ffff0000.*eth\\+8: val 33445566 mask 00000000.*eth\\+0: val ffeeddcc mask 00000000.*eth\\+4: val bbaa0000 mask 0000ffff.*ipv6\\+4: val ffff0000 mask 0000ffff.*ipv6\\+4: val 0000ff00 mask ffff00ff.*ipv6\\+4: val 00000000 mask ffffffff.* 0: val 12000000 mask 00ffffff.* 0: val 00345600 mask ff0000ff.* 0: val 00000078 mask ffffff00.*ipv4\\+8: val aa000000 mask 00ffffff.*ipv4\\+8: val 00ff0000 mask ff00ffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action pedit"
+ ]
+ }
+
+]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
index 4086a50a670e..b8268da5adaa 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
@@ -739,5 +739,30 @@
"teardown": [
"$TC actions flush action police"
]
+ },
+ {
+ "id": "689e",
+ "name": "Replace police action with invalid goto chain control",
+ "category": [
+ "actions",
+ "police"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action police",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action police rate 3mbit burst 250k drop index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action police rate 3mbit burst 250k goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action police index 90",
+ "matchPattern": "action order [0-9]*: police 0x5a rate 3Mbit burst 250Kb mtu 2Kb action drop",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action police"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
index 3aca33c00039..ddabb160a11b 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
@@ -144,6 +144,30 @@
]
},
{
+ "id": "7571",
+ "name": "Add sample action with invalid rate",
+ "category": [
+ "actions",
+ "sample"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action sample",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "$TC actions add action sample rate 0 group 1 index 2",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action sample index 2",
+ "matchPattern": "action order [0-9]+: sample rate 1/0 group 1.*index 2 ref",
+ "matchCount": "0",
+ "teardown": [
+ "$TC actions flush action sample"
+ ]
+ },
+ {
"id": "b6d4",
"name": "Add sample action with mandatory arguments and invalid control action",
"category": [
@@ -584,5 +608,30 @@
"teardown": [
"$TC actions flush action sample"
]
+ },
+ {
+ "id": "0a6e",
+ "name": "Replace sample action with invalid goto chain control",
+ "category": [
+ "actions",
+ "sample"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action sample",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action sample rate 1024 group 4 pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action sample rate 1024 group 7 goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action sample",
+ "matchPattern": "action order [0-9]+: sample rate 1/1024 group 4 pass.*index 90",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action sample"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json b/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
index e89a7aa4012d..8e8c1ae12260 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
@@ -126,5 +126,30 @@
"teardown": [
""
]
+ },
+ {
+ "id": "b776",
+ "name": "Replace simple action with invalid goto chain control",
+ "category": [
+ "actions",
+ "simple"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action simple",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action simple sdata \"hello\" pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action simple sdata \"world\" goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action simple",
+ "matchPattern": "action order [0-9]*: Simple <hello>.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action simple"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
index 5aaf593b914a..ecd96eda7f6a 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
@@ -484,5 +484,30 @@
"teardown": [
"$TC actions flush action skbedit"
]
+ },
+ {
+ "id": "1b2b",
+ "name": "Replace skbedit action with invalid goto_chain control",
+ "category": [
+ "actions",
+ "skbedit"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbedit",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action skbedit ptype host pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action skbedit ptype host goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions list action skbedit",
+ "matchPattern": "action order [0-9]*: skbedit ptype host pass.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action skbedit"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
index fe3326e939c1..6eb4c4f97060 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
@@ -392,5 +392,30 @@
"teardown": [
"$TC actions flush action skbmod"
]
+ },
+ {
+ "id": "b651",
+ "name": "Replace skbmod action with invalid goto_chain control",
+ "category": [
+ "actions",
+ "skbmod"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action skbmod",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action skbmod set etype 0x1111 pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action skbmod set etype 0x1111 goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions ls action skbmod",
+ "matchPattern": "action order [0-9]*: skbmod pass set etype 0x1111\\s+index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action skbmod"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
index e7e15a7336b6..28453a445fdb 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
@@ -884,5 +884,30 @@
"teardown": [
"$TC actions flush action tunnel_key"
]
+ },
+ {
+ "id": "8242",
+ "name": "Replace tunnel_key set action with invalid goto chain",
+ "category": [
+ "actions",
+ "tunnel_key"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action tunnel_key",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 10.10.10.2 dst_ip 20.20.20.1 dst_port 3129 id 2 csum goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action tunnel_key index 90",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*dst_port 3128.*csum pass.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action tunnel_key"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
index 69ea09eefffc..cc7c7d758008 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
@@ -688,5 +688,30 @@
"teardown": [
"$TC actions flush action vlan"
]
+ },
+ {
+ "id": "e394",
+ "name": "Replace vlan push action with invalid goto chain control",
+ "category": [
+ "actions",
+ "vlan"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action vlan",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action vlan push id 500 pass index 90"
+ ],
+ "cmdUnderTest": "$TC actions replace action vlan push id 500 goto chain 42 index 90 cookie c1a0c1a0",
+ "expExitCode": "255",
+ "verifyCmd": "$TC actions get action vlan index 90",
+ "matchPattern": "action order [0-9]+: vlan.*push id 500 protocol 802.1Q priority 0 pass.*index 90 ref",
+ "matchCount": "1",
+ "teardown": [
+ "$TC actions flush action vlan"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/concurrency.json b/tools/testing/selftests/tc-testing/tc-tests/filters/concurrency.json
new file mode 100644
index 000000000000..9002714b1851
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/concurrency.json
@@ -0,0 +1,177 @@
+[
+ {
+ "id": "e41d",
+ "name": "Add 1M flower filters with 10 parallel tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 100000 10 add"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/add* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "1000000",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "6f52",
+ "name": "Delete 1M flower filters with 10 parallel tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 1000000 1 add",
+ "$TC -b $BATCH_DIR/add_0",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 100000 10 del"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/del* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "c9da",
+ "name": "Replace 1M flower filters with 10 parallel tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 1000000 1 add",
+ "$TC -b $BATCH_DIR/add_0",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 100000 10 replace"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/replace* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "1000000",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "14be",
+ "name": "Concurrently replace same range of 100k flower filters from 10 tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 100000 1 add",
+ "$TC -b $BATCH_DIR/add_0",
+ "./tdc_multibatch.py -d $DEV2 $BATCH_DIR 100000 10 replace"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/replace* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "100000",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "0c44",
+ "name": "Concurrently delete same range of 100k flower filters from 10 tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py $DEV2 $BATCH_DIR 100000 1 add",
+ "$TC -b $BATCH_DIR/add_0",
+ "./tdc_multibatch.py -d $DEV2 $BATCH_DIR 100000 10 del"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/del* -print | xargs -n 1 -P 10 $TC -f -b",
+ "expExitCode": "123",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "0",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "ab62",
+ "name": "Add and delete from same tp with 10 tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py -x init_ $DEV2 $BATCH_DIR 100000 5 add",
+ "find $BATCH_DIR/init_* -print | xargs -n 1 -P 5 $TC -b",
+ "./tdc_multibatch.py -x par_ -a 500001 -m 5 $DEV2 $BATCH_DIR 100000 5 add",
+ "./tdc_multibatch.py -x par_ $DEV2 $BATCH_DIR 100000 5 del"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/par_* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "500000",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ },
+ {
+ "id": "6e8f",
+ "name": "Replace and delete from same tp with 10 tc instances",
+ "category": [
+ "filter",
+ "flower",
+ "concurrency"
+ ],
+ "setup": [
+ "/bin/mkdir $BATCH_DIR",
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_multibatch.py -x init_ $DEV2 $BATCH_DIR 100000 10 add",
+ "find $BATCH_DIR/init_* -print | xargs -n 1 -P 5 $TC -b",
+ "./tdc_multibatch.py -x par_ -a 500001 -m 5 $DEV2 $BATCH_DIR 100000 5 replace",
+ "./tdc_multibatch.py -x par_ $DEV2 $BATCH_DIR 100000 5 del"
+ ],
+ "cmdUnderTest": "find $BATCH_DIR/par_* -print | xargs -n 1 -P 10 $TC -b",
+ "expExitCode": "0",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "500000",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm -rf $BATCH_DIR"
+ ]
+ }
+]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
index 99a5ffca1088..e2f92cefb8d5 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
@@ -19,6 +19,26 @@
]
},
{
+ "id": "2638",
+ "name": "Add matchall and try to get it",
+ "category": [
+ "filter",
+ "matchall"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV1 clsact",
+ "$TC filter add dev $DEV1 protocol all pref 1 ingress handle 0x1234 matchall action ok"
+ ],
+ "cmdUnderTest": "$TC filter get dev $DEV1 protocol all pref 1 ingress handle 0x1234 matchall",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV1 ingress",
+ "matchPattern": "filter protocol all pref 1 matchall chain 0 handle 0x1234",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 clsact"
+ ]
+ },
+ {
"id": "d052",
"name": "Add 1M filters with the same action",
"category": [
@@ -38,5 +58,25 @@
"$TC qdisc del dev $DEV2 ingress",
"/bin/rm $BATCH_FILE"
]
+ },
+ {
+ "id": "4cbd",
+ "name": "Try to add filter with duplicate key",
+ "category": [
+ "filter",
+ "flower"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV2 ingress",
+ "$TC filter add dev $DEV2 protocol ip prio 1 parent ffff: flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip prio 1 parent ffff: flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop",
+ "expExitCode": "2",
+ "verifyCmd": "$TC -s filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower chain 0 handle",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress"
+ ]
}
]
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index e6e4ce80a726..5cee15659e5f 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -61,10 +61,10 @@ class PluginMgr:
for pgn_inst in reversed(self.plugin_instances):
pgn_inst.post_suite(index)
- def call_pre_case(self, test_ordinal, testid, test_name):
+ def call_pre_case(self, testid, test_name, *, test_skip=False):
for pgn_inst in self.plugin_instances:
try:
- pgn_inst.pre_case(test_ordinal, testid, test_name)
+ pgn_inst.pre_case(testid, test_name, test_skip)
except Exception as ee:
print('exception {} in call to pre_case for {} plugin'.
format(ee, pgn_inst.__class__))
@@ -192,10 +192,19 @@ def run_one_test(pm, args, index, tidx):
print("\t====================\n=====> ", end="")
print("Test " + tidx["id"] + ": " + tidx["name"])
+ if 'skip' in tidx:
+ if tidx['skip'] == 'yes':
+ res = TestResult(tidx['id'], tidx['name'])
+ res.set_result(ResultState.skip)
+ res.set_errormsg('Test case designated as skipped.')
+ pm.call_pre_case(tidx['id'], tidx['name'], test_skip=True)
+ pm.call_post_execute()
+ return res
+
# populate NAMES with TESTID for this test
NAMES['TESTID'] = tidx['id']
- pm.call_pre_case(index, tidx['id'], tidx['name'])
+ pm.call_pre_case(tidx['id'], tidx['name'])
prepare_env(args, pm, 'setup', "-----> prepare stage", tidx["setup"])
if (args.verbose > 0):
diff --git a/tools/testing/selftests/tc-testing/tdc_batch.py b/tools/testing/selftests/tc-testing/tdc_batch.py
index 52fa539dc662..6a2bd2cf528e 100755
--- a/tools/testing/selftests/tc-testing/tdc_batch.py
+++ b/tools/testing/selftests/tc-testing/tdc_batch.py
@@ -13,6 +13,12 @@ parser.add_argument("device", help="device name")
parser.add_argument("file", help="batch file name")
parser.add_argument("-n", "--number", type=int,
help="how many lines in batch file")
+parser.add_argument(
+ "-a",
+ "--handle_start",
+ type=int,
+ default=1,
+ help="start handle range from (default: 1)")
parser.add_argument("-o", "--skip_sw",
help="skip_sw (offload), by default skip_hw",
action="store_true")
@@ -22,6 +28,21 @@ parser.add_argument("-s", "--share_action",
parser.add_argument("-p", "--prio",
help="all filters have different prio",
action="store_true")
+parser.add_argument(
+ "-e",
+ "--operation",
+ choices=['add', 'del', 'replace'],
+ default='add',
+ help="operation to perform on filters"
+ "(default: add filter)")
+parser.add_argument(
+ "-m",
+ "--mac_prefix",
+ type=int,
+ default=0,
+ choices=range(0, 256),
+ help="third byte of source MAC address of flower filter"
+ "(default: 0)")
args = parser.parse_args()
device = args.device
@@ -31,6 +52,8 @@ number = 1
if args.number:
number = args.number
+handle_start = args.handle_start
+
skip = "skip_hw"
if args.skip_sw:
skip = "skip_sw"
@@ -45,16 +68,43 @@ if args.prio:
if number > 0x4000:
number = 0x4000
+mac_prefix = args.mac_prefix
+
+def format_add_filter(device, prio, handle, skip, src_mac, dst_mac,
+ share_action):
+ return ("filter add dev {} {} protocol ip parent ffff: handle {} "
+ " flower {} src_mac {} dst_mac {} action drop {}".format(
+ device, prio, handle, skip, src_mac, dst_mac, share_action))
+
+
+def format_rep_filter(device, prio, handle, skip, src_mac, dst_mac,
+ share_action):
+ return ("filter replace dev {} {} protocol ip parent ffff: handle {} "
+ " flower {} src_mac {} dst_mac {} action drop {}".format(
+ device, prio, handle, skip, src_mac, dst_mac, share_action))
+
+
+def format_del_filter(device, prio, handle, skip, src_mac, dst_mac,
+ share_action):
+ return ("filter del dev {} {} protocol ip parent ffff: handle {} "
+ "flower".format(device, prio, handle))
+
+
+formatter = format_add_filter
+if args.operation == "del":
+ formatter = format_del_filter
+elif args.operation == "replace":
+ formatter = format_rep_filter
+
index = 0
for i in range(0x100):
for j in range(0x100):
for k in range(0x100):
mac = ("{:02x}:{:02x}:{:02x}".format(i, j, k))
- src_mac = "e4:11:00:" + mac
+ src_mac = "e4:11:{:02x}:{}".format(mac_prefix, mac)
dst_mac = "e4:12:00:" + mac
- cmd = ("filter add dev {} {} protocol ip parent ffff: flower {} "
- "src_mac {} dst_mac {} action drop {}".format
- (device, prio, skip, src_mac, dst_mac, share_action))
+ cmd = formatter(device, prio, handle_start + index, skip, src_mac,
+ dst_mac, share_action)
file.write("{}\n".format(cmd))
index += 1
if index >= number:
diff --git a/tools/testing/selftests/tc-testing/tdc_config.py b/tools/testing/selftests/tc-testing/tdc_config.py
index 6d91e48c2625..942c70c041be 100644
--- a/tools/testing/selftests/tc-testing/tdc_config.py
+++ b/tools/testing/selftests/tc-testing/tdc_config.py
@@ -15,6 +15,7 @@ NAMES = {
'DEV1': 'v0p1',
'DEV2': '',
'BATCH_FILE': './batch.txt',
+ 'BATCH_DIR': 'tmp',
# Length of time in seconds to wait before terminating a command
'TIMEOUT': 12,
# Name of the namespace to use
diff --git a/tools/testing/selftests/tc-testing/tdc_multibatch.py b/tools/testing/selftests/tc-testing/tdc_multibatch.py
new file mode 100755
index 000000000000..5e7237952e49
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tdc_multibatch.py
@@ -0,0 +1,65 @@
+#!/usr/bin/python3
+# SPDX-License-Identifier: GPL-2.0
+"""
+tdc_multibatch.py - a thin wrapper over tdc_batch.py to generate multiple batch
+files
+
+Copyright (C) 2019 Vlad Buslov <vladbu@mellanox.com>
+"""
+
+import argparse
+import os
+
+parser = argparse.ArgumentParser(
+ description='TC multiple batch file generator')
+parser.add_argument("device", help="device name")
+parser.add_argument("dir", help="where to put batch files")
+parser.add_argument(
+ "num_filters", type=int, help="how many lines per batch file")
+parser.add_argument("num_files", type=int, help="how many batch files")
+parser.add_argument(
+ "operation",
+ choices=['add', 'del', 'replace'],
+ help="operation to perform on filters")
+parser.add_argument(
+ "-x",
+ "--file_prefix",
+ default="",
+ help="prefix for generated batch file names")
+parser.add_argument(
+ "-d",
+ "--duplicate_handles",
+ action="store_true",
+ help="duplicate filter handle range in all files")
+parser.add_argument(
+ "-a",
+ "--handle_start",
+ type=int,
+ default=1,
+ help="start handle range from (default: 1)")
+parser.add_argument(
+ "-m",
+ "--mac_prefix",
+ type=int,
+ default=0,
+ choices=range(0, 256),
+ help="add this value to third byte of source MAC address of flower filter"
+ "(default: 0)")
+args = parser.parse_args()
+
+device = args.device
+dir = args.dir
+file_prefix = args.file_prefix + args.operation + "_"
+num_filters = args.num_filters
+num_files = args.num_files
+operation = args.operation
+duplicate_handles = args.duplicate_handles
+handle = args.handle_start
+mac_prefix = args.mac_prefix
+
+for i in range(num_files):
+ file = dir + '/' + file_prefix + str(i)
+ os.system("./tdc_batch.py -n {} -a {} -e {} -m {} {} {}".format(
+ num_filters, handle, operation, i + mac_prefix, device, file))
+ if not duplicate_handles:
+ handle += num_filters
diff --git a/tools/testing/selftests/timers/adjtick.c b/tools/testing/selftests/timers/adjtick.c
index 0caca3a06bd2..54d8d87f36b3 100644
--- a/tools/testing/selftests/timers/adjtick.c
+++ b/tools/testing/selftests/timers/adjtick.c
@@ -136,6 +136,7 @@ int check_tick_adj(long tickval)
eppm = get_ppm_drift();
printf("%lld usec, %lld ppm", systick + (systick * eppm / MILLION), eppm);
+ fflush(stdout);
tx1.modes = 0;
adjtimex(&tx1);
diff --git a/tools/testing/selftests/timers/freq-step.c b/tools/testing/selftests/timers/freq-step.c
index 14a2b77fd012..8cd10662ffba 100644
--- a/tools/testing/selftests/timers/freq-step.c
+++ b/tools/testing/selftests/timers/freq-step.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* This test checks the response of the system clock to frequency
* steps made with adjtimex(). The frequency error and stability of
@@ -6,15 +7,6 @@
* values from the second interval exceed specified limits.
*
* Copyright (C) Miroslav Lichvar <mlichvar@redhat.com> 2017
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <math.h>
diff --git a/tools/testing/selftests/timers/leapcrash.c b/tools/testing/selftests/timers/leapcrash.c
index 830c462f605d..dc80728ed191 100644
--- a/tools/testing/selftests/timers/leapcrash.c
+++ b/tools/testing/selftests/timers/leapcrash.c
@@ -101,6 +101,7 @@ int main(void)
}
clear_time_state();
printf(".");
+ fflush(stdout);
}
printf("[OK]\n");
return ksft_exit_pass();
diff --git a/tools/testing/selftests/timers/mqueue-lat.c b/tools/testing/selftests/timers/mqueue-lat.c
index 1867db5d6f5e..7916cf5cc6ff 100644
--- a/tools/testing/selftests/timers/mqueue-lat.c
+++ b/tools/testing/selftests/timers/mqueue-lat.c
@@ -102,6 +102,7 @@ int main(int argc, char **argv)
int ret;
printf("Mqueue latency : ");
+ fflush(stdout);
ret = mqueue_lat_test();
if (ret < 0) {
diff --git a/tools/testing/selftests/timers/nanosleep.c b/tools/testing/selftests/timers/nanosleep.c
index 8adb0bb51d4d..71b5441c2fd9 100644
--- a/tools/testing/selftests/timers/nanosleep.c
+++ b/tools/testing/selftests/timers/nanosleep.c
@@ -142,6 +142,7 @@ int main(int argc, char **argv)
continue;
printf("Nanosleep %-31s ", clockstring(clockid));
+ fflush(stdout);
length = 10;
while (length <= (NSEC_PER_SEC * 10)) {
diff --git a/tools/testing/selftests/timers/nsleep-lat.c b/tools/testing/selftests/timers/nsleep-lat.c
index c3c3dc10db17..eb3e79ed7b4a 100644
--- a/tools/testing/selftests/timers/nsleep-lat.c
+++ b/tools/testing/selftests/timers/nsleep-lat.c
@@ -155,6 +155,7 @@ int main(int argc, char **argv)
continue;
printf("nsleep latency %-26s ", clockstring(clockid));
+ fflush(stdout);
length = 10;
while (length <= (NSEC_PER_SEC * 10)) {
diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
index 15cf56d32155..0ba500056e63 100644
--- a/tools/testing/selftests/timers/posix_timers.c
+++ b/tools/testing/selftests/timers/posix_timers.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
*
- * Licensed under the terms of the GNU GPL License version 2
- *
* Selftests for a few posix timers interface.
*
* Kernel loop code stolen from Steven Rostedt <srostedt@redhat.com>
diff --git a/tools/testing/selftests/timers/raw_skew.c b/tools/testing/selftests/timers/raw_skew.c
index dcf73c5dab6e..b41d8dd0c40c 100644
--- a/tools/testing/selftests/timers/raw_skew.c
+++ b/tools/testing/selftests/timers/raw_skew.c
@@ -112,6 +112,7 @@ int main(int argv, char **argc)
printf("WARNING: ADJ_OFFSET in progress, this will cause inaccurate results\n");
printf("Estimating clock drift: ");
+ fflush(stdout);
sleep(120);
get_monotonic_and_raw(&mon, &raw);
diff --git a/tools/testing/selftests/timers/set-tai.c b/tools/testing/selftests/timers/set-tai.c
index 70fed27d8fd3..8c4179ee2ca2 100644
--- a/tools/testing/selftests/timers/set-tai.c
+++ b/tools/testing/selftests/timers/set-tai.c
@@ -55,6 +55,7 @@ int main(int argc, char **argv)
printf("tai offset started at %i\n", ret);
printf("Checking tai offsets can be properly set: ");
+ fflush(stdout);
for (i = 1; i <= 60; i++) {
ret = set_tai(i);
ret = get_tai();
diff --git a/tools/testing/selftests/timers/set-tz.c b/tools/testing/selftests/timers/set-tz.c
index 877fd5532fee..62bd33eb16f0 100644
--- a/tools/testing/selftests/timers/set-tz.c
+++ b/tools/testing/selftests/timers/set-tz.c
@@ -65,6 +65,7 @@ int main(int argc, char **argv)
printf("tz_minuteswest started at %i, dst at %i\n", min, dst);
printf("Checking tz_minuteswest can be properly set: ");
+ fflush(stdout);
for (i = -15*60; i < 15*60; i += 30) {
ret = set_tz(i, dst);
ret = get_tz_min();
@@ -76,6 +77,7 @@ int main(int argc, char **argv)
printf("[OK]\n");
printf("Checking invalid tz_minuteswest values are caught: ");
+ fflush(stdout);
if (!set_tz(-15*60-1, dst)) {
printf("[FAILED] %i didn't return failure!\n", -15*60-1);
diff --git a/tools/testing/selftests/timers/skew_consistency.c b/tools/testing/selftests/timers/skew_consistency.c
index 022b711c78ee..8066be9aff11 100644
--- a/tools/testing/selftests/timers/skew_consistency.c
+++ b/tools/testing/selftests/timers/skew_consistency.c
@@ -32,7 +32,6 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
-#include <stdlib.h>
#include <string.h>
#include <sys/wait.h>
#include "../kselftest.h"
diff --git a/tools/testing/selftests/timers/threadtest.c b/tools/testing/selftests/timers/threadtest.c
index 759c9c06f1a0..cf3e48919874 100644
--- a/tools/testing/selftests/timers/threadtest.c
+++ b/tools/testing/selftests/timers/threadtest.c
@@ -163,6 +163,7 @@ int main(int argc, char **argv)
strftime(buf, 255, "%a, %d %b %Y %T %z", localtime(&start));
printf("%s\n", buf);
printf("Testing consistency with %i threads for %ld seconds: ", thread_count, runtime);
+ fflush(stdout);
/* spawn */
for (i = 0; i < thread_count; i++)
diff --git a/tools/testing/selftests/timers/valid-adjtimex.c b/tools/testing/selftests/timers/valid-adjtimex.c
index d9d3ab93b31a..5397de708d3c 100644
--- a/tools/testing/selftests/timers/valid-adjtimex.c
+++ b/tools/testing/selftests/timers/valid-adjtimex.c
@@ -123,6 +123,7 @@ int validate_freq(void)
/* Set the leap second insert flag */
printf("Testing ADJ_FREQ... ");
+ fflush(stdout);
for (i = 0; i < NUM_FREQ_VALID; i++) {
tx.modes = ADJ_FREQUENCY;
tx.freq = valid_freq[i];
@@ -250,6 +251,7 @@ int set_bad_offset(long sec, long usec, int use_nano)
int validate_set_offset(void)
{
printf("Testing ADJ_SETOFFSET... ");
+ fflush(stdout);
/* Test valid values */
if (set_offset(NSEC_PER_SEC - 1, 1))
diff --git a/tools/testing/selftests/tmpfs/.gitignore b/tools/testing/selftests/tmpfs/.gitignore
new file mode 100644
index 000000000000..a96838fad74d
--- /dev/null
+++ b/tools/testing/selftests/tmpfs/.gitignore
@@ -0,0 +1 @@
+/bug-link-o-tmpfile
diff --git a/tools/testing/selftests/tmpfs/Makefile b/tools/testing/selftests/tmpfs/Makefile
new file mode 100644
index 000000000000..aa11ccc92e5b
--- /dev/null
+++ b/tools/testing/selftests/tmpfs/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+CFLAGS += -Wall -O2
+CFLAGS += -D_GNU_SOURCE
+
+TEST_GEN_PROGS :=
+TEST_GEN_PROGS += bug-link-o-tmpfile
+
+include ../lib.mk
diff --git a/tools/testing/selftests/tmpfs/bug-link-o-tmpfile.c b/tools/testing/selftests/tmpfs/bug-link-o-tmpfile.c
new file mode 100644
index 000000000000..b5c3ddb90942
--- /dev/null
+++ b/tools/testing/selftests/tmpfs/bug-link-o-tmpfile.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2019 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/* Test that open(O_TMPFILE), linkat() doesn't screw accounting. */
+#include <errno.h>
+#include <sched.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/mount.h>
+#include <unistd.h>
+
+int main(void)
+{
+ int fd;
+
+ if (unshare(CLONE_NEWNS) == -1) {
+ if (errno == ENOSYS || errno == EPERM) {
+ fprintf(stderr, "error: unshare, errno %d\n", errno);
+ return 4;
+ }
+ fprintf(stderr, "error: unshare, errno %d\n", errno);
+ return 1;
+ }
+ if (mount(NULL, "/", NULL, MS_PRIVATE|MS_REC, NULL) == -1) {
+ fprintf(stderr, "error: mount '/', errno %d\n", errno);
+ return 1;
+ }
+
+ /* Our heroes: 1 root inode, 1 O_TMPFILE inode, 1 permanent inode. */
+ if (mount(NULL, "/tmp", "tmpfs", 0, "nr_inodes=3") == -1) {
+ fprintf(stderr, "error: mount tmpfs, errno %d\n", errno);
+ return 1;
+ }
+
+ fd = openat(AT_FDCWD, "/tmp", O_WRONLY|O_TMPFILE, 0600);
+ if (fd == -1) {
+ fprintf(stderr, "error: open 1, errno %d\n", errno);
+ return 1;
+ }
+ if (linkat(fd, "", AT_FDCWD, "/tmp/1", AT_EMPTY_PATH) == -1) {
+ fprintf(stderr, "error: linkat, errno %d\n", errno);
+ return 1;
+ }
+ close(fd);
+
+ fd = openat(AT_FDCWD, "/tmp", O_WRONLY|O_TMPFILE, 0600);
+ if (fd == -1) {
+ fprintf(stderr, "error: open 2, errno %d\n", errno);
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/tpm2/Makefile b/tools/testing/selftests/tpm2/Makefile
new file mode 100644
index 000000000000..9dd848427a7b
--- /dev/null
+++ b/tools/testing/selftests/tpm2/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+include ../lib.mk
+
+TEST_PROGS := test_smoke.sh test_space.sh
diff --git a/tools/testing/selftests/tpm2/test_smoke.sh b/tools/testing/selftests/tpm2/test_smoke.sh
new file mode 100755
index 000000000000..80521d46220c
--- /dev/null
+++ b/tools/testing/selftests/tpm2/test_smoke.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+python -m unittest -v tpm2_tests.SmokeTest
diff --git a/tools/testing/selftests/tpm2/test_space.sh b/tools/testing/selftests/tpm2/test_space.sh
new file mode 100755
index 000000000000..a6f5e346635e
--- /dev/null
+++ b/tools/testing/selftests/tpm2/test_space.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+python -m unittest -v tpm2_tests.SpaceTest
diff --git a/tools/testing/selftests/tpm2/tpm2.py b/tools/testing/selftests/tpm2/tpm2.py
new file mode 100644
index 000000000000..828c18584624
--- /dev/null
+++ b/tools/testing/selftests/tpm2/tpm2.py
@@ -0,0 +1,697 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+import hashlib
+import os
+import socket
+import struct
+import sys
+import unittest
+from fcntl import ioctl
+
+
+TPM2_ST_NO_SESSIONS = 0x8001
+TPM2_ST_SESSIONS = 0x8002
+
+TPM2_CC_FIRST = 0x01FF
+
+TPM2_CC_CREATE_PRIMARY = 0x0131
+TPM2_CC_DICTIONARY_ATTACK_LOCK_RESET = 0x0139
+TPM2_CC_CREATE = 0x0153
+TPM2_CC_LOAD = 0x0157
+TPM2_CC_UNSEAL = 0x015E
+TPM2_CC_FLUSH_CONTEXT = 0x0165
+TPM2_CC_START_AUTH_SESSION = 0x0176
+TPM2_CC_GET_CAPABILITY = 0x017A
+TPM2_CC_GET_RANDOM = 0x017B
+TPM2_CC_PCR_READ = 0x017E
+TPM2_CC_POLICY_PCR = 0x017F
+TPM2_CC_PCR_EXTEND = 0x0182
+TPM2_CC_POLICY_PASSWORD = 0x018C
+TPM2_CC_POLICY_GET_DIGEST = 0x0189
+
+TPM2_SE_POLICY = 0x01
+TPM2_SE_TRIAL = 0x03
+
+TPM2_ALG_RSA = 0x0001
+TPM2_ALG_SHA1 = 0x0004
+TPM2_ALG_AES = 0x0006
+TPM2_ALG_KEYEDHASH = 0x0008
+TPM2_ALG_SHA256 = 0x000B
+TPM2_ALG_NULL = 0x0010
+TPM2_ALG_CBC = 0x0042
+TPM2_ALG_CFB = 0x0043
+
+TPM2_RH_OWNER = 0x40000001
+TPM2_RH_NULL = 0x40000007
+TPM2_RH_LOCKOUT = 0x4000000A
+TPM2_RS_PW = 0x40000009
+
+TPM2_RC_SIZE = 0x01D5
+TPM2_RC_AUTH_FAIL = 0x098E
+TPM2_RC_POLICY_FAIL = 0x099D
+TPM2_RC_COMMAND_CODE = 0x0143
+
+TSS2_RC_LAYER_SHIFT = 16
+TSS2_RESMGR_TPM_RC_LAYER = (11 << TSS2_RC_LAYER_SHIFT)
+
+TPM2_CAP_HANDLES = 0x00000001
+TPM2_CAP_COMMANDS = 0x00000002
+TPM2_CAP_TPM_PROPERTIES = 0x00000006
+
+TPM2_PT_FIXED = 0x100
+TPM2_PT_TOTAL_COMMANDS = TPM2_PT_FIXED + 41
+
+HR_SHIFT = 24
+HR_LOADED_SESSION = 0x02000000
+HR_TRANSIENT = 0x80000000
+
+SHA1_DIGEST_SIZE = 20
+SHA256_DIGEST_SIZE = 32
+
+TPM2_VER0_ERRORS = {
+ 0x000: "TPM_RC_SUCCESS",
+ 0x030: "TPM_RC_BAD_TAG",
+}
+
+TPM2_VER1_ERRORS = {
+ 0x000: "TPM_RC_FAILURE",
+ 0x001: "TPM_RC_FAILURE",
+ 0x003: "TPM_RC_SEQUENCE",
+ 0x00B: "TPM_RC_PRIVATE",
+ 0x019: "TPM_RC_HMAC",
+ 0x020: "TPM_RC_DISABLED",
+ 0x021: "TPM_RC_EXCLUSIVE",
+ 0x024: "TPM_RC_AUTH_TYPE",
+ 0x025: "TPM_RC_AUTH_MISSING",
+ 0x026: "TPM_RC_POLICY",
+ 0x027: "TPM_RC_PCR",
+ 0x028: "TPM_RC_PCR_CHANGED",
+ 0x02D: "TPM_RC_UPGRADE",
+ 0x02E: "TPM_RC_TOO_MANY_CONTEXTS",
+ 0x02F: "TPM_RC_AUTH_UNAVAILABLE",
+ 0x030: "TPM_RC_REBOOT",
+ 0x031: "TPM_RC_UNBALANCED",
+ 0x042: "TPM_RC_COMMAND_SIZE",
+ 0x043: "TPM_RC_COMMAND_CODE",
+ 0x044: "TPM_RC_AUTHSIZE",
+ 0x045: "TPM_RC_AUTH_CONTEXT",
+ 0x046: "TPM_RC_NV_RANGE",
+ 0x047: "TPM_RC_NV_SIZE",
+ 0x048: "TPM_RC_NV_LOCKED",
+ 0x049: "TPM_RC_NV_AUTHORIZATION",
+ 0x04A: "TPM_RC_NV_UNINITIALIZED",
+ 0x04B: "TPM_RC_NV_SPACE",
+ 0x04C: "TPM_RC_NV_DEFINED",
+ 0x050: "TPM_RC_BAD_CONTEXT",
+ 0x051: "TPM_RC_CPHASH",
+ 0x052: "TPM_RC_PARENT",
+ 0x053: "TPM_RC_NEEDS_TEST",
+ 0x054: "TPM_RC_NO_RESULT",
+ 0x055: "TPM_RC_SENSITIVE",
+ 0x07F: "RC_MAX_FM0",
+}
+
+TPM2_FMT1_ERRORS = {
+ 0x001: "TPM_RC_ASYMMETRIC",
+ 0x002: "TPM_RC_ATTRIBUTES",
+ 0x003: "TPM_RC_HASH",
+ 0x004: "TPM_RC_VALUE",
+ 0x005: "TPM_RC_HIERARCHY",
+ 0x007: "TPM_RC_KEY_SIZE",
+ 0x008: "TPM_RC_MGF",
+ 0x009: "TPM_RC_MODE",
+ 0x00A: "TPM_RC_TYPE",
+ 0x00B: "TPM_RC_HANDLE",
+ 0x00C: "TPM_RC_KDF",
+ 0x00D: "TPM_RC_RANGE",
+ 0x00E: "TPM_RC_AUTH_FAIL",
+ 0x00F: "TPM_RC_NONCE",
+ 0x010: "TPM_RC_PP",
+ 0x012: "TPM_RC_SCHEME",
+ 0x015: "TPM_RC_SIZE",
+ 0x016: "TPM_RC_SYMMETRIC",
+ 0x017: "TPM_RC_TAG",
+ 0x018: "TPM_RC_SELECTOR",
+ 0x01A: "TPM_RC_INSUFFICIENT",
+ 0x01B: "TPM_RC_SIGNATURE",
+ 0x01C: "TPM_RC_KEY",
+ 0x01D: "TPM_RC_POLICY_FAIL",
+ 0x01F: "TPM_RC_INTEGRITY",
+ 0x020: "TPM_RC_TICKET",
+ 0x021: "TPM_RC_RESERVED_BITS",
+ 0x022: "TPM_RC_BAD_AUTH",
+ 0x023: "TPM_RC_EXPIRED",
+ 0x024: "TPM_RC_POLICY_CC",
+ 0x025: "TPM_RC_BINDING",
+ 0x026: "TPM_RC_CURVE",
+ 0x027: "TPM_RC_ECC_POINT",
+}
+
+TPM2_WARN_ERRORS = {
+ 0x001: "TPM_RC_CONTEXT_GAP",
+ 0x002: "TPM_RC_OBJECT_MEMORY",
+ 0x003: "TPM_RC_SESSION_MEMORY",
+ 0x004: "TPM_RC_MEMORY",
+ 0x005: "TPM_RC_SESSION_HANDLES",
+ 0x006: "TPM_RC_OBJECT_HANDLES",
+ 0x007: "TPM_RC_LOCALITY",
+ 0x008: "TPM_RC_YIELDED",
+ 0x009: "TPM_RC_CANCELED",
+ 0x00A: "TPM_RC_TESTING",
+ 0x010: "TPM_RC_REFERENCE_H0",
+ 0x011: "TPM_RC_REFERENCE_H1",
+ 0x012: "TPM_RC_REFERENCE_H2",
+ 0x013: "TPM_RC_REFERENCE_H3",
+ 0x014: "TPM_RC_REFERENCE_H4",
+ 0x015: "TPM_RC_REFERENCE_H5",
+ 0x016: "TPM_RC_REFERENCE_H6",
+ 0x018: "TPM_RC_REFERENCE_S0",
+ 0x019: "TPM_RC_REFERENCE_S1",
+ 0x01A: "TPM_RC_REFERENCE_S2",
+ 0x01B: "TPM_RC_REFERENCE_S3",
+ 0x01C: "TPM_RC_REFERENCE_S4",
+ 0x01D: "TPM_RC_REFERENCE_S5",
+ 0x01E: "TPM_RC_REFERENCE_S6",
+ 0x020: "TPM_RC_NV_RATE",
+ 0x021: "TPM_RC_LOCKOUT",
+ 0x022: "TPM_RC_RETRY",
+ 0x023: "TPM_RC_NV_UNAVAILABLE",
+ 0x7F: "TPM_RC_NOT_USED",
+}
+
+RC_VER1 = 0x100
+RC_FMT1 = 0x080
+RC_WARN = 0x900
+
+ALG_DIGEST_SIZE_MAP = {
+ TPM2_ALG_SHA1: SHA1_DIGEST_SIZE,
+ TPM2_ALG_SHA256: SHA256_DIGEST_SIZE,
+}
+
+ALG_HASH_FUNCTION_MAP = {
+ TPM2_ALG_SHA1: hashlib.sha1,
+ TPM2_ALG_SHA256: hashlib.sha256
+}
+
+NAME_ALG_MAP = {
+ "sha1": TPM2_ALG_SHA1,
+ "sha256": TPM2_ALG_SHA256,
+}
+
+
+class UnknownAlgorithmIdError(Exception):
+ def __init__(self, alg):
+ self.alg = alg
+
+ def __str__(self):
+ return '0x%0x' % (alg)
+
+
+class UnknownAlgorithmNameError(Exception):
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return name
+
+
+class UnknownPCRBankError(Exception):
+ def __init__(self, alg):
+ self.alg = alg
+
+ def __str__(self):
+ return '0x%0x' % (alg)
+
+
+class ProtocolError(Exception):
+ def __init__(self, cc, rc):
+ self.cc = cc
+ self.rc = rc
+
+ if (rc & RC_FMT1) == RC_FMT1:
+ self.name = TPM2_FMT1_ERRORS.get(rc & 0x3f, "TPM_RC_UNKNOWN")
+ elif (rc & RC_WARN) == RC_WARN:
+ self.name = TPM2_WARN_ERRORS.get(rc & 0x7f, "TPM_RC_UNKNOWN")
+ elif (rc & RC_VER1) == RC_VER1:
+ self.name = TPM2_VER1_ERRORS.get(rc & 0x7f, "TPM_RC_UNKNOWN")
+ else:
+ self.name = TPM2_VER0_ERRORS.get(rc & 0x7f, "TPM_RC_UNKNOWN")
+
+ def __str__(self):
+ if self.cc:
+ return '%s: cc=0x%08x, rc=0x%08x' % (self.name, self.cc, self.rc)
+ else:
+ return '%s: rc=0x%08x' % (self.name, self.rc)
+
+
+class AuthCommand(object):
+ """TPMS_AUTH_COMMAND"""
+
+ def __init__(self, session_handle=TPM2_RS_PW, nonce='', session_attributes=0,
+ hmac=''):
+ self.session_handle = session_handle
+ self.nonce = nonce
+ self.session_attributes = session_attributes
+ self.hmac = hmac
+
+ def __str__(self):
+ fmt = '>I H%us B H%us' % (len(self.nonce), len(self.hmac))
+ return struct.pack(fmt, self.session_handle, len(self.nonce),
+ self.nonce, self.session_attributes, len(self.hmac),
+ self.hmac)
+
+ def __len__(self):
+ fmt = '>I H%us B H%us' % (len(self.nonce), len(self.hmac))
+ return struct.calcsize(fmt)
+
+
+class SensitiveCreate(object):
+ """TPMS_SENSITIVE_CREATE"""
+
+ def __init__(self, user_auth='', data=''):
+ self.user_auth = user_auth
+ self.data = data
+
+ def __str__(self):
+ fmt = '>H%us H%us' % (len(self.user_auth), len(self.data))
+ return struct.pack(fmt, len(self.user_auth), self.user_auth,
+ len(self.data), self.data)
+
+ def __len__(self):
+ fmt = '>H%us H%us' % (len(self.user_auth), len(self.data))
+ return struct.calcsize(fmt)
+
+
+class Public(object):
+ """TPMT_PUBLIC"""
+
+ FIXED_TPM = (1 << 1)
+ FIXED_PARENT = (1 << 4)
+ SENSITIVE_DATA_ORIGIN = (1 << 5)
+ USER_WITH_AUTH = (1 << 6)
+ RESTRICTED = (1 << 16)
+ DECRYPT = (1 << 17)
+
+ def __fmt(self):
+ return '>HHIH%us%usH%us' % \
+ (len(self.auth_policy), len(self.parameters), len(self.unique))
+
+ def __init__(self, object_type, name_alg, object_attributes, auth_policy='',
+ parameters='', unique=''):
+ self.object_type = object_type
+ self.name_alg = name_alg
+ self.object_attributes = object_attributes
+ self.auth_policy = auth_policy
+ self.parameters = parameters
+ self.unique = unique
+
+ def __str__(self):
+ return struct.pack(self.__fmt(),
+ self.object_type,
+ self.name_alg,
+ self.object_attributes,
+ len(self.auth_policy),
+ self.auth_policy,
+ self.parameters,
+ len(self.unique),
+ self.unique)
+
+ def __len__(self):
+ return struct.calcsize(self.__fmt())
+
+
+def get_digest_size(alg):
+ ds = ALG_DIGEST_SIZE_MAP.get(alg)
+ if not ds:
+ raise UnknownAlgorithmIdError(alg)
+ return ds
+
+
+def get_hash_function(alg):
+ f = ALG_HASH_FUNCTION_MAP.get(alg)
+ if not f:
+ raise UnknownAlgorithmIdError(alg)
+ return f
+
+
+def get_algorithm(name):
+ alg = NAME_ALG_MAP.get(name)
+ if not alg:
+ raise UnknownAlgorithmNameError(name)
+ return alg
+
+
+def hex_dump(d):
+ d = [format(ord(x), '02x') for x in d]
+ d = [d[i: i + 16] for i in xrange(0, len(d), 16)]
+ d = [' '.join(x) for x in d]
+ d = os.linesep.join(d)
+
+ return d
+
+class Client:
+ FLAG_DEBUG = 0x01
+ FLAG_SPACE = 0x02
+ TPM_IOC_NEW_SPACE = 0xa200
+
+ def __init__(self, flags = 0):
+ self.flags = flags
+
+ if (self.flags & Client.FLAG_SPACE) == 0:
+ self.tpm = open('/dev/tpm0', 'r+b', buffering=0)
+ else:
+ self.tpm = open('/dev/tpmrm0', 'r+b', buffering=0)
+
+ def close(self):
+ self.tpm.close()
+
+ def send_cmd(self, cmd):
+ self.tpm.write(cmd)
+ rsp = self.tpm.read()
+
+ if (self.flags & Client.FLAG_DEBUG) != 0:
+ sys.stderr.write('cmd' + os.linesep)
+ sys.stderr.write(hex_dump(cmd) + os.linesep)
+ sys.stderr.write('rsp' + os.linesep)
+ sys.stderr.write(hex_dump(rsp) + os.linesep)
+
+ rc = struct.unpack('>I', rsp[6:10])[0]
+ if rc != 0:
+ cc = struct.unpack('>I', cmd[6:10])[0]
+ raise ProtocolError(cc, rc)
+
+ return rsp
+
+ def read_pcr(self, i, bank_alg = TPM2_ALG_SHA1):
+ pcrsel_len = max((i >> 3) + 1, 3)
+ pcrsel = [0] * pcrsel_len
+ pcrsel[i >> 3] = 1 << (i & 7)
+ pcrsel = ''.join(map(chr, pcrsel))
+
+ fmt = '>HII IHB%us' % (pcrsel_len)
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_PCR_READ,
+ 1,
+ bank_alg,
+ pcrsel_len, pcrsel)
+
+ rsp = self.send_cmd(cmd)
+
+ pcr_update_cnt, pcr_select_cnt = struct.unpack('>II', rsp[10:18])
+ assert pcr_select_cnt == 1
+ rsp = rsp[18:]
+
+ alg2, pcrsel_len2 = struct.unpack('>HB', rsp[:3])
+ assert bank_alg == alg2 and pcrsel_len == pcrsel_len2
+ rsp = rsp[3 + pcrsel_len:]
+
+ digest_cnt = struct.unpack('>I', rsp[:4])[0]
+ if digest_cnt == 0:
+ return None
+ rsp = rsp[6:]
+
+ return rsp
+
+ def extend_pcr(self, i, dig, bank_alg = TPM2_ALG_SHA1):
+ ds = get_digest_size(bank_alg)
+ assert(ds == len(dig))
+
+ auth_cmd = AuthCommand()
+
+ fmt = '>HII I I%us IH%us' % (len(auth_cmd), ds)
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_PCR_EXTEND,
+ i,
+ len(auth_cmd),
+ str(auth_cmd),
+ 1, bank_alg, dig)
+
+ self.send_cmd(cmd)
+
+ def start_auth_session(self, session_type, name_alg = TPM2_ALG_SHA1):
+ fmt = '>HII IIH16sHBHH'
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_START_AUTH_SESSION,
+ TPM2_RH_NULL,
+ TPM2_RH_NULL,
+ 16,
+ '\0' * 16,
+ 0,
+ session_type,
+ TPM2_ALG_NULL,
+ name_alg)
+
+ return struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
+
+ def __calc_pcr_digest(self, pcrs, bank_alg = TPM2_ALG_SHA1,
+ digest_alg = TPM2_ALG_SHA1):
+ x = []
+ f = get_hash_function(digest_alg)
+
+ for i in pcrs:
+ pcr = self.read_pcr(i, bank_alg)
+ if pcr == None:
+ return None
+ x += pcr
+
+ return f(bytearray(x)).digest()
+
+ def policy_pcr(self, handle, pcrs, bank_alg = TPM2_ALG_SHA1,
+ name_alg = TPM2_ALG_SHA1):
+ ds = get_digest_size(name_alg)
+ dig = self.__calc_pcr_digest(pcrs, bank_alg, name_alg)
+ if not dig:
+ raise UnknownPCRBankError(bank_alg)
+
+ pcrsel_len = max((max(pcrs) >> 3) + 1, 3)
+ pcrsel = [0] * pcrsel_len
+ for i in pcrs:
+ pcrsel[i >> 3] |= 1 << (i & 7)
+ pcrsel = ''.join(map(chr, pcrsel))
+
+ fmt = '>HII IH%usIHB3s' % ds
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_POLICY_PCR,
+ handle,
+ len(dig), str(dig),
+ 1,
+ bank_alg,
+ pcrsel_len, pcrsel)
+
+ self.send_cmd(cmd)
+
+ def policy_password(self, handle):
+ fmt = '>HII I'
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_POLICY_PASSWORD,
+ handle)
+
+ self.send_cmd(cmd)
+
+ def get_policy_digest(self, handle):
+ fmt = '>HII I'
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_POLICY_GET_DIGEST,
+ handle)
+
+ return self.send_cmd(cmd)[12:]
+
+ def flush_context(self, handle):
+ fmt = '>HIII'
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_FLUSH_CONTEXT,
+ handle)
+
+ self.send_cmd(cmd)
+
+ def create_root_key(self, auth_value = ''):
+ attributes = \
+ Public.FIXED_TPM | \
+ Public.FIXED_PARENT | \
+ Public.SENSITIVE_DATA_ORIGIN | \
+ Public.USER_WITH_AUTH | \
+ Public.RESTRICTED | \
+ Public.DECRYPT
+
+ auth_cmd = AuthCommand()
+ sensitive = SensitiveCreate(user_auth=auth_value)
+
+ public_parms = struct.pack(
+ '>HHHHHI',
+ TPM2_ALG_AES,
+ 128,
+ TPM2_ALG_CFB,
+ TPM2_ALG_NULL,
+ 2048,
+ 0)
+
+ public = Public(
+ object_type=TPM2_ALG_RSA,
+ name_alg=TPM2_ALG_SHA1,
+ object_attributes=attributes,
+ parameters=public_parms)
+
+ fmt = '>HIII I%us H%us H%us HI' % \
+ (len(auth_cmd), len(sensitive), len(public))
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_CREATE_PRIMARY,
+ TPM2_RH_OWNER,
+ len(auth_cmd),
+ str(auth_cmd),
+ len(sensitive),
+ str(sensitive),
+ len(public),
+ str(public),
+ 0, 0)
+
+ return struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
+
+ def seal(self, parent_key, data, auth_value, policy_dig,
+ name_alg = TPM2_ALG_SHA1):
+ ds = get_digest_size(name_alg)
+ assert(not policy_dig or ds == len(policy_dig))
+
+ attributes = 0
+ if not policy_dig:
+ attributes |= Public.USER_WITH_AUTH
+ policy_dig = ''
+
+ auth_cmd = AuthCommand()
+ sensitive = SensitiveCreate(user_auth=auth_value, data=data)
+
+ public = Public(
+ object_type=TPM2_ALG_KEYEDHASH,
+ name_alg=name_alg,
+ object_attributes=attributes,
+ auth_policy=policy_dig,
+ parameters=struct.pack('>H', TPM2_ALG_NULL))
+
+ fmt = '>HIII I%us H%us H%us HI' % \
+ (len(auth_cmd), len(sensitive), len(public))
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_CREATE,
+ parent_key,
+ len(auth_cmd),
+ str(auth_cmd),
+ len(sensitive),
+ str(sensitive),
+ len(public),
+ str(public),
+ 0, 0)
+
+ rsp = self.send_cmd(cmd)
+
+ return rsp[14:]
+
+ def unseal(self, parent_key, blob, auth_value, policy_handle):
+ private_len = struct.unpack('>H', blob[0:2])[0]
+ public_start = private_len + 2
+ public_len = struct.unpack('>H', blob[public_start:public_start + 2])[0]
+ blob = blob[:private_len + public_len + 4]
+
+ auth_cmd = AuthCommand()
+
+ fmt = '>HII I I%us %us' % (len(auth_cmd), len(blob))
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_LOAD,
+ parent_key,
+ len(auth_cmd),
+ str(auth_cmd),
+ blob)
+
+ data_handle = struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
+
+ if policy_handle:
+ auth_cmd = AuthCommand(session_handle=policy_handle, hmac=auth_value)
+ else:
+ auth_cmd = AuthCommand(hmac=auth_value)
+
+ fmt = '>HII I I%us' % (len(auth_cmd))
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_UNSEAL,
+ data_handle,
+ len(auth_cmd),
+ str(auth_cmd))
+
+ try:
+ rsp = self.send_cmd(cmd)
+ finally:
+ self.flush_context(data_handle)
+
+ data_len = struct.unpack('>I', rsp[10:14])[0] - 2
+
+ return rsp[16:16 + data_len]
+
+ def reset_da_lock(self):
+ auth_cmd = AuthCommand()
+
+ fmt = '>HII I I%us' % (len(auth_cmd))
+ cmd = struct.pack(
+ fmt,
+ TPM2_ST_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_DICTIONARY_ATTACK_LOCK_RESET,
+ TPM2_RH_LOCKOUT,
+ len(auth_cmd),
+ str(auth_cmd))
+
+ self.send_cmd(cmd)
+
+ def __get_cap_cnt(self, cap, pt, cnt):
+ handles = []
+ fmt = '>HII III'
+
+ cmd = struct.pack(fmt,
+ TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ TPM2_CC_GET_CAPABILITY,
+ cap, pt, cnt)
+
+ rsp = self.send_cmd(cmd)[10:]
+ more_data, cap, cnt = struct.unpack('>BII', rsp[:9])
+ rsp = rsp[9:]
+
+ for i in xrange(0, cnt):
+ handle = struct.unpack('>I', rsp[:4])[0]
+ handles.append(handle)
+ rsp = rsp[4:]
+
+ return handles, more_data
+
+ def get_cap(self, cap, pt):
+ handles = []
+
+ more_data = True
+ while more_data:
+ next_handles, more_data = self.__get_cap_cnt(cap, pt, 1)
+ handles += next_handles
+ pt += 1
+
+ return handles
diff --git a/tools/testing/selftests/tpm2/tpm2_tests.py b/tools/testing/selftests/tpm2/tpm2_tests.py
new file mode 100644
index 000000000000..d4973be53493
--- /dev/null
+++ b/tools/testing/selftests/tpm2/tpm2_tests.py
@@ -0,0 +1,290 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+
+from argparse import ArgumentParser
+from argparse import FileType
+import os
+import sys
+import tpm2
+from tpm2 import ProtocolError
+import unittest
+import logging
+import struct
+
+class SmokeTest(unittest.TestCase):
+ def setUp(self):
+ self.client = tpm2.Client()
+ self.root_key = self.client.create_root_key()
+
+ def tearDown(self):
+ self.client.flush_context(self.root_key)
+ self.client.close()
+
+ def test_seal_with_auth(self):
+ data = 'X' * 64
+ auth = 'A' * 15
+
+ blob = self.client.seal(self.root_key, data, auth, None)
+ result = self.client.unseal(self.root_key, blob, auth, None)
+ self.assertEqual(data, result)
+
+ def test_seal_with_policy(self):
+ handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL)
+
+ data = 'X' * 64
+ auth = 'A' * 15
+ pcrs = [16]
+
+ try:
+ self.client.policy_pcr(handle, pcrs)
+ self.client.policy_password(handle)
+
+ policy_dig = self.client.get_policy_digest(handle)
+ finally:
+ self.client.flush_context(handle)
+
+ blob = self.client.seal(self.root_key, data, auth, policy_dig)
+
+ handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
+
+ try:
+ self.client.policy_pcr(handle, pcrs)
+ self.client.policy_password(handle)
+
+ result = self.client.unseal(self.root_key, blob, auth, handle)
+ except:
+ self.client.flush_context(handle)
+ raise
+
+ self.assertEqual(data, result)
+
+ def test_unseal_with_wrong_auth(self):
+ data = 'X' * 64
+ auth = 'A' * 20
+ rc = 0
+
+ blob = self.client.seal(self.root_key, data, auth, None)
+ try:
+ result = self.client.unseal(self.root_key, blob, auth[:-1] + 'B', None)
+ except ProtocolError, e:
+ rc = e.rc
+
+ self.assertEqual(rc, tpm2.TPM2_RC_AUTH_FAIL)
+
+ def test_unseal_with_wrong_policy(self):
+ handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL)
+
+ data = 'X' * 64
+ auth = 'A' * 17
+ pcrs = [16]
+
+ try:
+ self.client.policy_pcr(handle, pcrs)
+ self.client.policy_password(handle)
+
+ policy_dig = self.client.get_policy_digest(handle)
+ finally:
+ self.client.flush_context(handle)
+
+ blob = self.client.seal(self.root_key, data, auth, policy_dig)
+
+ # Extend first a PCR that is not part of the policy and try to unseal.
+ # This should succeed.
+
+ ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1)
+ self.client.extend_pcr(1, 'X' * ds)
+
+ handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
+
+ try:
+ self.client.policy_pcr(handle, pcrs)
+ self.client.policy_password(handle)
+
+ result = self.client.unseal(self.root_key, blob, auth, handle)
+ except:
+ self.client.flush_context(handle)
+ raise
+
+ self.assertEqual(data, result)
+
+ # Then, extend a PCR that is part of the policy and try to unseal.
+ # This should fail.
+ self.client.extend_pcr(16, 'X' * ds)
+
+ handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
+
+ rc = 0
+
+ try:
+ self.client.policy_pcr(handle, pcrs)
+ self.client.policy_password(handle)
+
+ result = self.client.unseal(self.root_key, blob, auth, handle)
+ except ProtocolError, e:
+ rc = e.rc
+ self.client.flush_context(handle)
+ except:
+ self.client.flush_context(handle)
+ raise
+
+ self.assertEqual(rc, tpm2.TPM2_RC_POLICY_FAIL)
+
+ def test_seal_with_too_long_auth(self):
+ ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1)
+ data = 'X' * 64
+ auth = 'A' * (ds + 1)
+
+ rc = 0
+ try:
+ blob = self.client.seal(self.root_key, data, auth, None)
+ except ProtocolError, e:
+ rc = e.rc
+
+ self.assertEqual(rc, tpm2.TPM2_RC_SIZE)
+
+ def test_too_short_cmd(self):
+ rejected = False
+ try:
+ fmt = '>HIII'
+ cmd = struct.pack(fmt,
+ tpm2.TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt) + 1,
+ tpm2.TPM2_CC_FLUSH_CONTEXT,
+ 0xDEADBEEF)
+
+ self.client.send_cmd(cmd)
+ except IOError, e:
+ rejected = True
+ except:
+ pass
+ self.assertEqual(rejected, True)
+
+ def test_read_partial_resp(self):
+ try:
+ fmt = '>HIIH'
+ cmd = struct.pack(fmt,
+ tpm2.TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ tpm2.TPM2_CC_GET_RANDOM,
+ 0x20)
+ self.client.tpm.write(cmd)
+ hdr = self.client.tpm.read(10)
+ sz = struct.unpack('>I', hdr[2:6])[0]
+ rsp = self.client.tpm.read()
+ except:
+ pass
+ self.assertEqual(sz, 10 + 2 + 32)
+ self.assertEqual(len(rsp), 2 + 32)
+
+ def test_read_partial_overwrite(self):
+ try:
+ fmt = '>HIIH'
+ cmd = struct.pack(fmt,
+ tpm2.TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ tpm2.TPM2_CC_GET_RANDOM,
+ 0x20)
+ self.client.tpm.write(cmd)
+ # Read part of the respone
+ rsp1 = self.client.tpm.read(15)
+
+ # Send a new cmd
+ self.client.tpm.write(cmd)
+
+ # Read the whole respone
+ rsp2 = self.client.tpm.read()
+ except:
+ pass
+ self.assertEqual(len(rsp1), 15)
+ self.assertEqual(len(rsp2), 10 + 2 + 32)
+
+ def test_send_two_cmds(self):
+ rejected = False
+ try:
+ fmt = '>HIIH'
+ cmd = struct.pack(fmt,
+ tpm2.TPM2_ST_NO_SESSIONS,
+ struct.calcsize(fmt),
+ tpm2.TPM2_CC_GET_RANDOM,
+ 0x20)
+ self.client.tpm.write(cmd)
+
+ # expect the second one to raise -EBUSY error
+ self.client.tpm.write(cmd)
+ rsp = self.client.tpm.read()
+
+ except IOError, e:
+ # read the response
+ rsp = self.client.tpm.read()
+ rejected = True
+ pass
+ except:
+ pass
+ self.assertEqual(rejected, True)
+
+class SpaceTest(unittest.TestCase):
+ def setUp(self):
+ logging.basicConfig(filename='SpaceTest.log', level=logging.DEBUG)
+
+ def test_make_two_spaces(self):
+ log = logging.getLogger(__name__)
+ log.debug("test_make_two_spaces")
+
+ space1 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ root1 = space1.create_root_key()
+ space2 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ root2 = space2.create_root_key()
+ root3 = space2.create_root_key()
+
+ log.debug("%08x" % (root1))
+ log.debug("%08x" % (root2))
+ log.debug("%08x" % (root3))
+
+ def test_flush_context(self):
+ log = logging.getLogger(__name__)
+ log.debug("test_flush_context")
+
+ space1 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ root1 = space1.create_root_key()
+ log.debug("%08x" % (root1))
+
+ space1.flush_context(root1)
+
+ def test_get_handles(self):
+ log = logging.getLogger(__name__)
+ log.debug("test_get_handles")
+
+ space1 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ space1.create_root_key()
+ space2 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ space2.create_root_key()
+ space2.create_root_key()
+
+ handles = space2.get_cap(tpm2.TPM2_CAP_HANDLES, tpm2.HR_TRANSIENT)
+
+ self.assertEqual(len(handles), 2)
+
+ log.debug("%08x" % (handles[0]))
+ log.debug("%08x" % (handles[1]))
+
+ def test_invalid_cc(self):
+ log = logging.getLogger(__name__)
+ log.debug(sys._getframe().f_code.co_name)
+
+ TPM2_CC_INVALID = tpm2.TPM2_CC_FIRST - 1
+
+ space1 = tpm2.Client(tpm2.Client.FLAG_SPACE)
+ root1 = space1.create_root_key()
+ log.debug("%08x" % (root1))
+
+ fmt = '>HII'
+ cmd = struct.pack(fmt, tpm2.TPM2_ST_NO_SESSIONS, struct.calcsize(fmt),
+ TPM2_CC_INVALID)
+
+ rc = 0
+ try:
+ space1.send_cmd(cmd)
+ except ProtocolError, e:
+ rc = e.rc
+
+ self.assertEqual(rc, tpm2.TPM2_RC_COMMAND_CODE |
+ tpm2.TSS2_RESMGR_TPM_RC_LAYER)
diff --git a/tools/testing/selftests/user/Makefile b/tools/testing/selftests/user/Makefile
index d401b63c5b1a..640a40f9b72b 100644
--- a/tools/testing/selftests/user/Makefile
+++ b/tools/testing/selftests/user/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
# Makefile for user memory selftests
# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
diff --git a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
index 93b0ebf8cc38..5ac4b00acfbc 100644
--- a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
+++ b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* vdso_test.c: Sample code to test parse_vdso.c on x86
* Copyright (c) 2011-2014 Andy Lutomirski
- * Subject to the GNU General Public License, version 2
*
* You can amuse yourself by compiling with:
* gcc -std=gnu99 -nostdlib
diff --git a/tools/testing/selftests/vDSO/vdso_test.c b/tools/testing/selftests/vDSO/vdso_test.c
index eda53f833d8e..719d5a6bd664 100644
--- a/tools/testing/selftests/vDSO/vdso_test.c
+++ b/tools/testing/selftests/vDSO/vdso_test.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* vdso_test.c: Sample code to test parse_vdso.c
* Copyright (c) 2014 Andy Lutomirski
- * Subject to the GNU General Public License, version 2
*
* Compile with:
* gcc -std=gnu99 vdso_test.c parse_vdso.c
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index e13eb6cc8901..9534dc2bc929 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -1,10 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for vm selftests
-ifndef OUTPUT
- OUTPUT := $(shell pwd)
-endif
-
CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS)
LDLIBS = -lrt
TEST_GEN_FILES = compaction_test
@@ -25,6 +21,8 @@ TEST_GEN_FILES += virtual_address_range
TEST_PROGS := run_vmtests
+TEST_FILES := test_vmalloc.sh
+
KSFT_KHDR_INSTALL := 1
include ../lib.mk
diff --git a/tools/testing/selftests/vm/map_hugetlb.c b/tools/testing/selftests/vm/map_hugetlb.c
index 9b777fa95f09..5a2d7b8efc40 100644
--- a/tools/testing/selftests/vm/map_hugetlb.c
+++ b/tools/testing/selftests/vm/map_hugetlb.c
@@ -23,6 +23,14 @@
#define MAP_HUGETLB 0x40000 /* arch specific */
#endif
+#ifndef MAP_HUGE_SHIFT
+#define MAP_HUGE_SHIFT 26
+#endif
+
+#ifndef MAP_HUGE_MASK
+#define MAP_HUGE_MASK 0x3f
+#endif
+
/* Only ia64 requires this */
#ifdef __ia64__
#define ADDR (void *)(0x8000000000000000UL)
@@ -58,12 +66,29 @@ static int read_bytes(char *addr)
return 0;
}
-int main(void)
+int main(int argc, char **argv)
{
void *addr;
int ret;
+ size_t length = LENGTH;
+ int flags = FLAGS;
+ int shift = 0;
+
+ if (argc > 1)
+ length = atol(argv[1]) << 20;
+ if (argc > 2) {
+ shift = atoi(argv[2]);
+ if (shift)
+ flags |= (shift & MAP_HUGE_MASK) << MAP_HUGE_SHIFT;
+ }
+
+ if (shift)
+ printf("%u kB hugepages\n", 1 << shift);
+ else
+ printf("Default size hugepages\n");
+ printf("Mapping %lu Mbytes\n", (unsigned long)length >> 20);
- addr = mmap(ADDR, LENGTH, PROTECTION, FLAGS, -1, 0);
+ addr = mmap(ADDR, length, PROTECTION, flags, -1, 0);
if (addr == MAP_FAILED) {
perror("mmap");
exit(1);
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
index 584a91ae4a8f..951c507a27f7 100755
--- a/tools/testing/selftests/vm/run_vmtests
+++ b/tools/testing/selftests/vm/run_vmtests
@@ -211,4 +211,20 @@ else
echo "[PASS]"
fi
+echo "------------------------------------"
+echo "running vmalloc stability smoke test"
+echo "------------------------------------"
+./test_vmalloc.sh smoke
+ret_val=$?
+
+if [ $ret_val -eq 0 ]; then
+ echo "[PASS]"
+elif [ $ret_val -eq $ksft_skip ]; then
+ echo "[SKIP]"
+ exitcode=$ksft_skip
+else
+ echo "[FAIL]"
+ exitcode=1
+fi
+
exit $exitcode
diff --git a/tools/testing/selftests/vm/test_vmalloc.sh b/tools/testing/selftests/vm/test_vmalloc.sh
new file mode 100755
index 000000000000..06d2bb109f06
--- /dev/null
+++ b/tools/testing/selftests/vm/test_vmalloc.sh
@@ -0,0 +1,176 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (C) 2018 Uladzislau Rezki (Sony) <urezki@gmail.com>
+#
+# This is a test script for the kernel test driver to analyse vmalloc
+# allocator. Therefore it is just a kernel module loader. You can specify
+# and pass different parameters in order to:
+# a) analyse performance of vmalloc allocations;
+# b) stressing and stability check of vmalloc subsystem.
+
+TEST_NAME="vmalloc"
+DRIVER="test_${TEST_NAME}"
+
+# 1 if fails
+exitcode=1
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+#
+# Static templates for performance, stressing and smoke tests.
+# Also it is possible to pass any supported parameters manualy.
+#
+PERF_PARAM="single_cpu_test=1 sequential_test_order=1 test_repeat_count=3"
+SMOKE_PARAM="single_cpu_test=1 test_loop_count=10000 test_repeat_count=10"
+STRESS_PARAM="test_repeat_count=20"
+
+check_test_requirements()
+{
+ uid=$(id -u)
+ if [ $uid -ne 0 ]; then
+ echo "$0: Must be run as root"
+ exit $ksft_skip
+ fi
+
+ if ! which modprobe > /dev/null 2>&1; then
+ echo "$0: You need modprobe installed"
+ exit $ksft_skip
+ fi
+
+ if ! modinfo $DRIVER > /dev/null 2>&1; then
+ echo "$0: You must have the following enabled in your kernel:"
+ echo "CONFIG_TEST_VMALLOC=m"
+ exit $ksft_skip
+ fi
+}
+
+run_perfformance_check()
+{
+ echo "Run performance tests to evaluate how fast vmalloc allocation is."
+ echo "It runs all test cases on one single CPU with sequential order."
+
+ modprobe $DRIVER $PERF_PARAM > /dev/null 2>&1
+ echo "Done."
+ echo "Ccheck the kernel message buffer to see the summary."
+}
+
+run_stability_check()
+{
+ echo "Run stability tests. In order to stress vmalloc subsystem we run"
+ echo "all available test cases on all available CPUs simultaneously."
+ echo "It will take time, so be patient."
+
+ modprobe $DRIVER $STRESS_PARAM > /dev/null 2>&1
+ echo "Done."
+ echo "Check the kernel ring buffer to see the summary."
+}
+
+run_smoke_check()
+{
+ echo "Run smoke test. Note, this test provides basic coverage."
+ echo "Please check $0 output how it can be used"
+ echo "for deep performance analysis as well as stress testing."
+
+ modprobe $DRIVER $SMOKE_PARAM > /dev/null 2>&1
+ echo "Done."
+ echo "Check the kernel ring buffer to see the summary."
+}
+
+usage()
+{
+ echo -n "Usage: $0 [ performance ] | [ stress ] | | [ smoke ] | "
+ echo "manual parameters"
+ echo
+ echo "Valid tests and parameters:"
+ echo
+ modinfo $DRIVER
+ echo
+ echo "Example usage:"
+ echo
+ echo "# Shows help message"
+ echo "./${DRIVER}.sh"
+ echo
+ echo "# Runs 1 test(id_1), repeats it 5 times on all online CPUs"
+ echo "./${DRIVER}.sh run_test_mask=1 test_repeat_count=5"
+ echo
+ echo -n "# Runs 4 tests(id_1|id_2|id_4|id_16) on one CPU with "
+ echo "sequential order"
+ echo -n "./${DRIVER}.sh single_cpu_test=1 sequential_test_order=1 "
+ echo "run_test_mask=23"
+ echo
+ echo -n "# Runs all tests on all online CPUs, shuffled order, repeats "
+ echo "20 times"
+ echo "./${DRIVER}.sh test_repeat_count=20"
+ echo
+ echo "# Performance analysis"
+ echo "./${DRIVER}.sh performance"
+ echo
+ echo "# Stress testing"
+ echo "./${DRIVER}.sh stress"
+ echo
+ exit 0
+}
+
+function validate_passed_args()
+{
+ VALID_ARGS=`modinfo $DRIVER | awk '/parm:/ {print $2}' | sed 's/:.*//'`
+
+ #
+ # Something has been passed, check it.
+ #
+ for passed_arg in $@; do
+ key=${passed_arg//=*/}
+ val="${passed_arg:$((${#key}+1))}"
+ valid=0
+
+ for valid_arg in $VALID_ARGS; do
+ if [[ $key = $valid_arg ]] && [[ $val -gt 0 ]]; then
+ valid=1
+ break
+ fi
+ done
+
+ if [[ $valid -ne 1 ]]; then
+ echo "Error: key or value is not correct: ${key} $val"
+ exit $exitcode
+ fi
+ done
+}
+
+function run_manual_check()
+{
+ #
+ # Validate passed parameters. If there is wrong one,
+ # the script exists and does not execute further.
+ #
+ validate_passed_args $@
+
+ echo "Run the test with following parameters: $@"
+ modprobe $DRIVER $@ > /dev/null 2>&1
+ echo "Done."
+ echo "Check the kernel ring buffer to see the summary."
+}
+
+function run_test()
+{
+ if [ $# -eq 0 ]; then
+ usage
+ else
+ if [[ "$1" = "performance" ]]; then
+ run_perfformance_check
+ elif [[ "$1" = "stress" ]]; then
+ run_stability_check
+ elif [[ "$1" = "smoke" ]]; then
+ run_smoke_check
+ else
+ run_manual_check $@
+ fi
+ fi
+}
+
+check_test_requirements
+run_test $@
+
+exit 0
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index 5d1db824f73a..d3362777a425 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Stress userfaultfd syscall.
*
* Copyright (C) 2015 Red Hat, Inc.
*
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
* This test allocates two virtual areas and bounces the physical
* memory across the two virtual areas (from area_src to area_dst)
* using userfaultfd.
@@ -123,7 +121,7 @@ static void usage(void)
fprintf(stderr, "Supported <test type>: anon, hugetlb, "
"hugetlb_shared, shmem\n\n");
fprintf(stderr, "Examples:\n\n");
- fprintf(stderr, examples);
+ fprintf(stderr, "%s", examples);
exit(1);
}
diff --git a/tools/testing/selftests/vm/va_128TBswitch.c b/tools/testing/selftests/vm/va_128TBswitch.c
index e7fe734c374f..83acdff26a13 100644
--- a/tools/testing/selftests/vm/va_128TBswitch.c
+++ b/tools/testing/selftests/vm/va_128TBswitch.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Authors: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
* Authors: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
-
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
*/
#include <stdio.h>
diff --git a/tools/testing/selftests/vm/virtual_address_range.c b/tools/testing/selftests/vm/virtual_address_range.c
index 1830d66a6f0e..c0592646ed93 100644
--- a/tools/testing/selftests/vm/virtual_address_range.c
+++ b/tools/testing/selftests/vm/virtual_address_range.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2017, Anshuman Khandual, IBM Corp.
- * Licensed under GPLv2.
*
* Works on architectures which support 128TB virtual
* address range and beyond.
diff --git a/tools/testing/selftests/x86/check_cc.sh b/tools/testing/selftests/x86/check_cc.sh
index 172d3293fb7b..3e2089c8cf54 100755
--- a/tools/testing/selftests/x86/check_cc.sh
+++ b/tools/testing/selftests/x86/check_cc.sh
@@ -1,7 +1,7 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
# check_cc.sh - Helper to test userspace compilation support
# Copyright (c) 2015 Andrew Lutomirski
-# GPL v2
CC="$1"
TESTPROG="$2"
diff --git a/tools/testing/selftests/x86/check_initial_reg_state.c b/tools/testing/selftests/x86/check_initial_reg_state.c
index 6aaed9b85baf..3bc95f3ed585 100644
--- a/tools/testing/selftests/x86/check_initial_reg_state.c
+++ b/tools/testing/selftests/x86/check_initial_reg_state.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* check_initial_reg_state.c - check that execve sets the correct state
* Copyright (c) 2014-2016 Andrew Lutomirski
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#define _GNU_SOURCE
diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c
index ade443a88421..d1e919b0c1dc 100644
--- a/tools/testing/selftests/x86/entry_from_vm86.c
+++ b/tools/testing/selftests/x86/entry_from_vm86.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* entry_from_vm86.c - tests kernel entries from vm86 mode
* Copyright (c) 2014-2015 Andrew Lutomirski
*
* This exercises a few paths that need to special-case vm86 mode.
- *
- * GPL v2.
*/
#define _GNU_SOURCE
diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
index f249e042b3b5..af85bd4752a5 100644
--- a/tools/testing/selftests/x86/fsgsbase.c
+++ b/tools/testing/selftests/x86/fsgsbase.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* fsgsbase.c, an fsgsbase test
* Copyright (c) 2014-2016 Andy Lutomirski
- * GPL v2
*/
#define _GNU_SOURCE
diff --git a/tools/testing/selftests/x86/mpx-dig.c b/tools/testing/selftests/x86/mpx-dig.c
index c13607ef5c11..880fbf676968 100644
--- a/tools/testing/selftests/x86/mpx-dig.c
+++ b/tools/testing/selftests/x86/mpx-dig.c
@@ -8,9 +8,7 @@
#include <unistd.h>
#include <stdio.h>
#include <errno.h>
-#include <sys/types.h>
#include <sys/stat.h>
-#include <unistd.h>
#include <sys/mman.h>
#include <string.h>
#include <fcntl.h>
diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c
index bf1bb15b6fbe..23ddd453f362 100644
--- a/tools/testing/selftests/x86/mpx-mini-test.c
+++ b/tools/testing/selftests/x86/mpx-mini-test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* mpx-mini-test.c: routines to test Intel MPX (Memory Protection eXtentions)
*
@@ -5,10 +6,6 @@
* "Ren, Qiaowei" <qiaowei.ren@intel.com>
* "Wei, Gang" <gang.wei@intel.com>
* "Hansen, Dave" <dave.hansen@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2.
*/
/*
diff --git a/tools/testing/selftests/x86/sigreturn.c b/tools/testing/selftests/x86/sigreturn.c
index 4d9dc3f2fd70..3e49a7873f3e 100644
--- a/tools/testing/selftests/x86/sigreturn.c
+++ b/tools/testing/selftests/x86/sigreturn.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* sigreturn.c - tests for x86 sigreturn(2) and exit-to-userspace
* Copyright (c) 2014-2015 Andrew Lutomirski
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
* This is a series of tests that exercises the sigreturn(2) syscall and
* the IRET / SYSRET paths in the kernel.
*
diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
index ddfdd635de16..50ce6c3dd904 100644
--- a/tools/testing/selftests/x86/single_step_syscall.c
+++ b/tools/testing/selftests/x86/single_step_syscall.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* single_step_syscall.c - single-steps various x86 syscalls
* Copyright (c) 2014-2015 Andrew Lutomirski
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
* This is a very simple series of tests that makes system calls with
* the TF flag set. This exercises some nasty kernel code in the
* SYSENTER case: SYSENTER does not clear TF, so SYSENTER with TF set
diff --git a/tools/testing/selftests/x86/syscall_arg_fault.c b/tools/testing/selftests/x86/syscall_arg_fault.c
index 7db4fc9fa09f..4e25d38c8bbd 100644
--- a/tools/testing/selftests/x86/syscall_arg_fault.c
+++ b/tools/testing/selftests/x86/syscall_arg_fault.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* syscall_arg_fault.c - tests faults 32-bit fast syscall stack args
* Copyright (c) 2015 Andrew Lutomirski
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#define _GNU_SOURCE
@@ -43,7 +35,7 @@ static sigjmp_buf jmpbuf;
static volatile sig_atomic_t n_errs;
-static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
+static void sigsegv_or_sigbus(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t*)ctx_void;
@@ -73,7 +65,13 @@ int main()
if (sigaltstack(&stack, NULL) != 0)
err(1, "sigaltstack");
- sethandler(SIGSEGV, sigsegv, SA_ONSTACK);
+ sethandler(SIGSEGV, sigsegv_or_sigbus, SA_ONSTACK);
+ /*
+ * The actual exception can vary. On Atom CPUs, we get #SS
+ * instead of #PF when the vDSO fails to access the stack when
+ * ESP is too close to 2^32, and #SS causes SIGBUS.
+ */
+ sethandler(SIGBUS, sigsegv_or_sigbus, SA_ONSTACK);
sethandler(SIGILL, sigill, SA_ONSTACK);
/*
diff --git a/tools/testing/selftests/x86/syscall_nt.c b/tools/testing/selftests/x86/syscall_nt.c
index 43fcab367fb0..02309a195041 100644
--- a/tools/testing/selftests/x86/syscall_nt.c
+++ b/tools/testing/selftests/x86/syscall_nt.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* syscall_nt.c - checks syscalls with NT set
* Copyright (c) 2014-2015 Andrew Lutomirski
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
* Some obscure user-space code requires the ability to make system calls
* with FLAGS.NT set. Make sure it works.
*/
diff --git a/tools/testing/selftests/x86/sysret_rip.c b/tools/testing/selftests/x86/sysret_rip.c
index d85ec5b3671c..84d74be1d902 100644
--- a/tools/testing/selftests/x86/sysret_rip.c
+++ b/tools/testing/selftests/x86/sysret_rip.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* sigreturn.c - tests that x86 avoids Intel SYSRET pitfalls
* Copyright (c) 2014-2016 Andrew Lutomirski
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#define _GNU_SOURCE
diff --git a/tools/testing/selftests/x86/sysret_ss_attrs.c b/tools/testing/selftests/x86/sysret_ss_attrs.c
index ce42d5a64009..5f3d4fca440f 100644
--- a/tools/testing/selftests/x86/sysret_ss_attrs.c
+++ b/tools/testing/selftests/x86/sysret_ss_attrs.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* sysret_ss_attrs.c - test that syscalls return valid hidden SS attributes
* Copyright (c) 2015 Andrew Lutomirski
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
* On AMD CPUs, SYSRET can return with a valid SS descriptor with with
* the hidden attributes set to an unusable state. Make sure the kernel
* doesn't let this happen.
diff --git a/tools/testing/selftests/x86/test_mremap_vdso.c b/tools/testing/selftests/x86/test_mremap_vdso.c
index 64f11c8d9b76..f0d876d48277 100644
--- a/tools/testing/selftests/x86/test_mremap_vdso.c
+++ b/tools/testing/selftests/x86/test_mremap_vdso.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* 32-bit test to check vDSO mremap.
*
* Copyright (c) 2016 Dmitry Safonov
* Suggested-by: Andrew Lutomirski
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
/*
* Can be built statically:
diff --git a/tools/testing/selftests/x86/test_syscall_vdso.c b/tools/testing/selftests/x86/test_syscall_vdso.c
index c9c3281077bc..8965c311bd65 100644
--- a/tools/testing/selftests/x86/test_syscall_vdso.c
+++ b/tools/testing/selftests/x86/test_syscall_vdso.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* 32-bit syscall ABI conformance test.
*
* Copyright (c) 2015 Denys Vlasenko
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
/*
* Can be built statically:
diff --git a/tools/testing/selftests/x86/thunks.S b/tools/testing/selftests/x86/thunks.S
index ce8a995bbb17..1bb5d62c16a4 100644
--- a/tools/testing/selftests/x86/thunks.S
+++ b/tools/testing/selftests/x86/thunks.S
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* thunks.S - assembly helpers for mixed-bitness code
* Copyright (c) 2015 Andrew Lutomirski
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
* These are little helpers that make it easier to switch bitness on
* the fly.
*/
diff --git a/tools/testing/selftests/x86/thunks_32.S b/tools/testing/selftests/x86/thunks_32.S
index 29b644bb9f2f..a71d92da8f46 100644
--- a/tools/testing/selftests/x86/thunks_32.S
+++ b/tools/testing/selftests/x86/thunks_32.S
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* thunks_32.S - assembly helpers for mixed-bitness code
* Copyright (c) 2015 Denys Vlasenko
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
* These are little helpers that make it easier to switch bitness on
* the fly.
*/
diff --git a/tools/testing/selftests/x86/trivial_32bit_program.c b/tools/testing/selftests/x86/trivial_32bit_program.c
index fabdf0f51621..aa1f58c2f71c 100644
--- a/tools/testing/selftests/x86/trivial_32bit_program.c
+++ b/tools/testing/selftests/x86/trivial_32bit_program.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Trivial program to check that we have a valid 32-bit build environment.
* Copyright (c) 2015 Andy Lutomirski
- * GPL v2
*/
#ifndef __i386__
diff --git a/tools/testing/selftests/x86/trivial_64bit_program.c b/tools/testing/selftests/x86/trivial_64bit_program.c
index 05c6a41b3671..39f4b84fbf15 100644
--- a/tools/testing/selftests/x86/trivial_64bit_program.c
+++ b/tools/testing/selftests/x86/trivial_64bit_program.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Trivial program to check that we have a valid 64-bit build environment.
* Copyright (c) 2015 Andy Lutomirski
- * GPL v2
*/
#ifndef __x86_64__
diff --git a/tools/testing/selftests/x86/unwind_vdso.c b/tools/testing/selftests/x86/unwind_vdso.c
index 97311333700e..0075ccd65407 100644
--- a/tools/testing/selftests/x86/unwind_vdso.c
+++ b/tools/testing/selftests/x86/unwind_vdso.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* unwind_vdso.c - tests unwind info for AT_SYSINFO in the vDSO
* Copyright (c) 2014-2015 Andrew Lutomirski
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
* This tests __kernel_vsyscall's unwind info.
*/
diff --git a/tools/testing/selftests/x86/vdso_restorer.c b/tools/testing/selftests/x86/vdso_restorer.c
index cb038424a403..29a5c94c4b50 100644
--- a/tools/testing/selftests/x86/vdso_restorer.c
+++ b/tools/testing/selftests/x86/vdso_restorer.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* vdso_restorer.c - tests vDSO-based signal restore
* Copyright (c) 2015 Andrew Lutomirski
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
* This makes sure that sa_restorer == NULL keeps working on 32-bit
* configurations. Modern glibc doesn't use it under any circumstances,
* so it's easy to overlook breakage.
diff --git a/tools/testing/selftests/zram/zram01.sh b/tools/testing/selftests/zram/zram01.sh
index b9566a6478a9..114863d9fb87 100755
--- a/tools/testing/selftests/zram/zram01.sh
+++ b/tools/testing/selftests/zram/zram01.sh
@@ -1,16 +1,7 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (c) 2015 Oracle and/or its affiliates. All Rights Reserved.
#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; either version 2 of
-# the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it would be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
# Test creates several zram devices with different filesystems on them.
# It fills each device with zeros and checks that compression works.
#
diff --git a/tools/testing/selftests/zram/zram02.sh b/tools/testing/selftests/zram/zram02.sh
index 74569b883737..e83b404807c0 100755
--- a/tools/testing/selftests/zram/zram02.sh
+++ b/tools/testing/selftests/zram/zram02.sh
@@ -1,16 +1,7 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (c) 2015 Oracle and/or its affiliates. All Rights Reserved.
#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; either version 2 of
-# the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it would be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
# Test checks that we can create swap zram device.
#
# Author: Alexey Kodanev <alexey.kodanev@oracle.com>
diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh
index 9e73a4fb9b0a..6f872f266fd1 100755
--- a/tools/testing/selftests/zram/zram_lib.sh
+++ b/tools/testing/selftests/zram/zram_lib.sh
@@ -1,16 +1,7 @@
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (c) 2015 Oracle and/or its affiliates. All Rights Reserved.
#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; either version 2 of
-# the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it would be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
# Author: Alexey Kodanev <alexey.kodanev@oracle.com>
# Modified: Naresh Kamboju <naresh.kamboju@linaro.org>
diff --git a/tools/testing/vsock/Makefile b/tools/testing/vsock/Makefile
index 66ba0924194d..5be687b1e16c 100644
--- a/tools/testing/vsock/Makefile
+++ b/tools/testing/vsock/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
all: test
test: vsock_diag_test
vsock_diag_test: vsock_diag_test.o timeout.o control.o
diff --git a/tools/testing/vsock/control.c b/tools/testing/vsock/control.c
index 90fd47f0e422..45f328c6ff23 100644
--- a/tools/testing/vsock/control.c
+++ b/tools/testing/vsock/control.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Control socket for client/server test execution
*
* Copyright (C) 2017 Red Hat, Inc.
*
* Author: Stefan Hajnoczi <stefanha@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
*/
/* The client and server may need to coordinate to avoid race conditions like
diff --git a/tools/testing/vsock/timeout.c b/tools/testing/vsock/timeout.c
index c49b3003b2db..44aee49b6cee 100644
--- a/tools/testing/vsock/timeout.c
+++ b/tools/testing/vsock/timeout.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Timeout API for single-threaded programs that use blocking
* syscalls (read/write/send/recv/connect/accept).
*
* Copyright (C) 2017 Red Hat, Inc.
*
* Author: Stefan Hajnoczi <stefanha@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
*/
/* Use the following pattern:
diff --git a/tools/testing/vsock/vsock_diag_test.c b/tools/testing/vsock/vsock_diag_test.c
index e896a4af52f4..c481101364a4 100644
--- a/tools/testing/vsock/vsock_diag_test.c
+++ b/tools/testing/vsock/vsock_diag_test.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* vsock_diag_test - vsock_diag.ko test suite
*
* Copyright (C) 2017 Red Hat, Inc.
*
* Author: Stefan Hajnoczi <stefanha@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
*/
#include <getopt.h>
diff --git a/tools/thermal/tmon/pid.c b/tools/thermal/tmon/pid.c
index fd7e9e9d6f4a..c54edb4f630c 100644
--- a/tools/thermal/tmon/pid.c
+++ b/tools/thermal/tmon/pid.c
@@ -1,21 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pid.c PID controller for testing cooling devices
*
- *
- *
* Copyright (C) 2012 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 or later as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author Name Jacob Pan <jacob.jun.pan@linux.intel.com>
- *
*/
#include <unistd.h>
diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c
index 18f523557983..b00b1bfd9d8e 100644
--- a/tools/thermal/tmon/sysfs.c
+++ b/tools/thermal/tmon/sysfs.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sysfs.c sysfs ABI access functions for TMON program
*
* Copyright (C) 2013 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 or later as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author: Jacob Pan <jacob.jun.pan@linux.intel.com>
- *
*/
#include <unistd.h>
#include <stdio.h>
diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c
index b43138f8b862..83ec6e482f12 100644
--- a/tools/thermal/tmon/tmon.c
+++ b/tools/thermal/tmon/tmon.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* tmon.c Thermal Monitor (TMON) main function and entry point
*
* Copyright (C) 2012 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 or later as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author: Jacob Pan <jacob.jun.pan@linux.intel.com>
- *
*/
#include <getopt.h>
diff --git a/tools/thermal/tmon/tmon.h b/tools/thermal/tmon/tmon.h
index 9e3c49c547ac..c9066ec104dd 100644
--- a/tools/thermal/tmon/tmon.h
+++ b/tools/thermal/tmon/tmon.h
@@ -1,19 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* tmon.h contains data structures and constants used by TMON
*
* Copyright (C) 2012 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 or later as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author Name Jacob Pan <jacob.jun.pan@linux.intel.com>
- *
*/
#ifndef TMON_H
diff --git a/tools/thermal/tmon/tui.c b/tools/thermal/tmon/tui.c
index b5d1c6b22dd3..031b258667d8 100644
--- a/tools/thermal/tmon/tui.c
+++ b/tools/thermal/tmon/tui.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* tui.c ncurses text user interface for TMON program
*
* Copyright (C) 2013 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 or later as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author: Jacob Pan <jacob.jun.pan@linux.intel.com>
- *
*/
#include <unistd.h>
diff --git a/tools/time/udelay_test.sh b/tools/time/udelay_test.sh
index 12d46b926917..6779d7e55d85 100755
--- a/tools/time/udelay_test.sh
+++ b/tools/time/udelay_test.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
# udelay() test script
#
@@ -8,14 +9,6 @@
#
# Copyright (C) 2014 Google, Inc.
#
-# This software is licensed under the terms of the GNU General Public
-# License version 2, as published by the Free Software Foundation, and
-# may be copied, distributed, and modified under those terms.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
MODULE_NAME=udelay_test
UDELAY_PATH=/sys/kernel/debug/udelay_test
diff --git a/tools/usb/ffs-aio-example/simple/host_app/Makefile b/tools/usb/ffs-aio-example/simple/host_app/Makefile
index 8c4a6f0aa82d..c3523837c936 100644
--- a/tools/usb/ffs-aio-example/simple/host_app/Makefile
+++ b/tools/usb/ffs-aio-example/simple/host_app/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
CC = gcc
LIBUSB_CFLAGS = $(shell pkg-config --cflags libusb-1.0)
LIBUSB_LIBS = $(shell pkg-config --libs libusb-1.0)
diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c
index 0f395dfb7774..22b938fbdfb7 100644
--- a/tools/usb/ffs-test.c
+++ b/tools/usb/ffs-test.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ffs-test.c -- user mode filesystem api for usb composite function
*
* Copyright (C) 2010 Samsung Electronics
* Author: Michal Nazarewicz <mina86@mina86.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* $(CROSS_COMPILE)cc -Wall -Wextra -g -o ffs-test ffs-test.c -lpthread */
diff --git a/tools/usb/testusb.c b/tools/usb/testusb.c
index 2d89b5f686b1..ee8208b2f946 100644
--- a/tools/usb/testusb.c
+++ b/tools/usb/testusb.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/* $(CROSS_COMPILE)cc -Wall -Wextra -g -lpthread -o testusb testusb.c */
/*
* Copyright (c) 2002 by David Brownell
* Copyright (c) 2010 by Samsung Electronics
* Author: Michal Nazarewicz <mina86@mina86.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
diff --git a/tools/usb/usbip/libsrc/names.c b/tools/usb/usbip/libsrc/names.c
index 81ff8522405c..aba7f4188044 100644
--- a/tools/usb/usbip/libsrc/names.c
+++ b/tools/usb/usbip/libsrc/names.c
@@ -1,29 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* names.c -- USB name database manipulation routines
*
* Copyright (C) 1999, 2000 Thomas Sailer (sailer@ife.ee.ethz.ch)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- *
- *
- *
* Copyright (C) 2005 Takahiro Hirofuchi
* - names_deinit() is added.
- *
*/
#include <sys/types.h>
diff --git a/tools/usb/usbip/libsrc/names.h b/tools/usb/usbip/libsrc/names.h
index 680926512de2..b39958230e70 100644
--- a/tools/usb/usbip/libsrc/names.h
+++ b/tools/usb/usbip/libsrc/names.h
@@ -1,24 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* names.h -- USB name database manipulation routines
*
* Copyright (C) 1999, 2000 Thomas Sailer (sailer@ife.ee.ethz.ch)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- *
* Copyright (C) 2005 Takahiro Hirofuchi
* - names_free() is added.
*/
diff --git a/tools/usb/usbip/libsrc/usbip_device_driver.c b/tools/usb/usbip/libsrc/usbip_device_driver.c
index ec3a0b794f15..5a3726eb44ab 100644
--- a/tools/usb/usbip/libsrc/usbip_device_driver.c
+++ b/tools/usb/usbip/libsrc/usbip_device_driver.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
* 2015 Samsung Electronics
@@ -6,19 +7,6 @@
* Based on tools/usb/usbip/libsrc/usbip_host_driver.c, which is:
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <fcntl.h>
diff --git a/tools/usb/usbip/libsrc/usbip_device_driver.h b/tools/usb/usbip/libsrc/usbip_device_driver.h
index 54cb658b37a3..1ce0bbd75f34 100644
--- a/tools/usb/usbip/libsrc/usbip_device_driver.h
+++ b/tools/usb/usbip/libsrc/usbip_device_driver.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
* 2015 Samsung Electronics
@@ -6,19 +7,6 @@
* Based on tools/usb/usbip/libsrc/usbip_host_driver.c, which is:
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __USBIP_DEVICE_DRIVER_H
diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c
index d79c7581b175..2813aa821c82 100644
--- a/tools/usb/usbip/libsrc/usbip_host_common.c
+++ b/tools/usb/usbip/libsrc/usbip_host_common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
@@ -6,19 +7,6 @@
* Refactored from usbip_host_driver.c, which is:
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <sys/types.h>
diff --git a/tools/usb/usbip/libsrc/usbip_host_common.h b/tools/usb/usbip/libsrc/usbip_host_common.h
index a64b8033fe64..f46967c0aa18 100644
--- a/tools/usb/usbip/libsrc/usbip_host_common.h
+++ b/tools/usb/usbip/libsrc/usbip_host_common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
@@ -6,19 +7,6 @@
* Refactored from usbip_host_driver.c, which is:
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __USBIP_HOST_COMMON_H
diff --git a/tools/usb/usbip/libsrc/usbip_host_driver.c b/tools/usb/usbip/libsrc/usbip_host_driver.c
index 4de6edc54d35..573e73ec36bd 100644
--- a/tools/usb/usbip/libsrc/usbip_host_driver.c
+++ b/tools/usb/usbip/libsrc/usbip_host_driver.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
* Krzysztof Opasiak <k.opasiak@samsung.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <unistd.h>
diff --git a/tools/usb/usbip/libsrc/usbip_host_driver.h b/tools/usb/usbip/libsrc/usbip_host_driver.h
index 77f07e72a7fe..6ba996c5a709 100644
--- a/tools/usb/usbip/libsrc/usbip_host_driver.h
+++ b/tools/usb/usbip/libsrc/usbip_host_driver.h
@@ -1,22 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
* Krzysztof Opasiak <k.opasiak@samsung.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __USBIP_HOST_DRIVER_H
diff --git a/tools/usb/usbip/src/usbip.c b/tools/usb/usbip/src/usbip.c
index 73d8eee8130b..f7c7220d9766 100644
--- a/tools/usb/usbip/src/usbip.c
+++ b/tools/usb/usbip/src/usbip.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* command structure borrowed from udev
* (git://git.kernel.org/pub/scm/linux/hotplug/udev.git)
*
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
diff --git a/tools/usb/usbip/src/usbip.h b/tools/usb/usbip/src/usbip.h
index 84fe66a9d8ad..e31779290601 100644
--- a/tools/usb/usbip/src/usbip.h
+++ b/tools/usb/usbip/src/usbip.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __USBIP_H
diff --git a/tools/usb/usbip/src/usbip_attach.c b/tools/usb/usbip/src/usbip_attach.c
index ba88728483ff..b4aeb9f1f493 100644
--- a/tools/usb/usbip/src/usbip_attach.c
+++ b/tools/usb/usbip/src/usbip_attach.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
* Krzysztof Opasiak <k.opasiak@samsung.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <sys/stat.h>
diff --git a/tools/usb/usbip/src/usbip_bind.c b/tools/usb/usbip/src/usbip_bind.c
index e121cfb1746a..f1cf9225a69c 100644
--- a/tools/usb/usbip/src/usbip_bind.c
+++ b/tools/usb/usbip/src/usbip_bind.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <libudev.h>
diff --git a/tools/usb/usbip/src/usbip_detach.c b/tools/usb/usbip/src/usbip_detach.c
index 777f7286a0c5..aec993159036 100644
--- a/tools/usb/usbip/src/usbip_detach.c
+++ b/tools/usb/usbip/src/usbip_detach.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <ctype.h>
diff --git a/tools/usb/usbip/src/usbip_list.c b/tools/usb/usbip/src/usbip_list.c
index 8d4ccf4b9480..8625b0f514ee 100644
--- a/tools/usb/usbip/src/usbip_list.c
+++ b/tools/usb/usbip/src/usbip_list.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
* Krzysztof Opasiak <k.opasiak@samsung.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <sys/types.h>
diff --git a/tools/usb/usbip/src/usbip_network.c b/tools/usb/usbip/src/usbip_network.c
index 8ffcd47d9638..d595d72693fb 100644
--- a/tools/usb/usbip/src/usbip_network.c
+++ b/tools/usb/usbip/src/usbip_network.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <sys/socket.h>
diff --git a/tools/usb/usbip/src/usbip_port.c b/tools/usb/usbip/src/usbip_port.c
index 7bd74fb3a9cd..4d14387df13d 100644
--- a/tools/usb/usbip/src/usbip_port.c
+++ b/tools/usb/usbip/src/usbip_port.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "vhci_driver.h"
diff --git a/tools/usb/usbip/src/usbip_unbind.c b/tools/usb/usbip/src/usbip_unbind.c
index a4a496c9cbaf..66a44d4a0d56 100644
--- a/tools/usb/usbip/src/usbip_unbind.c
+++ b/tools/usb/usbip/src/usbip_unbind.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <libudev.h>
diff --git a/tools/usb/usbip/src/usbipd.c b/tools/usb/usbip/src/usbipd.c
index 32864c52942d..48398a78e88a 100644
--- a/tools/usb/usbip/src/usbipd.c
+++ b/tools/usb/usbip/src/usbipd.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
* Krzysztof Opasiak <k.opasiak@samsung.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifdef HAVE_CONFIG_H
diff --git a/tools/usb/usbip/src/utils.c b/tools/usb/usbip/src/utils.c
index 3d7b42e77299..76a2e1247f33 100644
--- a/tools/usb/usbip/src/utils.c
+++ b/tools/usb/usbip/src/utils.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <errno.h>
diff --git a/tools/usb/usbip/src/utils.h b/tools/usb/usbip/src/utils.h
index 5916fd3e02a6..4fc13854f7b9 100644
--- a/tools/usb/usbip/src/utils.h
+++ b/tools/usb/usbip/src/utils.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
* 2005-2007 Takahiro Hirofuchi
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __UTILS_H
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index 7ef45a4a3cba..6683b4a70b05 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -127,7 +127,7 @@ static inline void free_page(unsigned long addr)
#define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
#define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
-#define WARN_ON_ONCE(cond) ((cond) ? fprintf (stderr, "WARNING\n") : 0)
+#define WARN_ON_ONCE(cond) (unlikely(cond) ? fprintf (stderr, "WARNING\n") : 0)
#define min(x, y) ({ \
typeof(x) _min1 = (x); \
diff --git a/tools/virtio/ringtest/main.c b/tools/virtio/ringtest/main.c
index 453ca3c21193..5a18b2301a63 100644
--- a/tools/virtio/ringtest/main.c
+++ b/tools/virtio/ringtest/main.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Red Hat, Inc.
* Author: Michael S. Tsirkin <mst@redhat.com>
- * This work is licensed under the terms of the GNU GPL, version 2.
*
* Command line processing and common functions for ring benchmarking.
*/
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 301d59bfcd0a..6d1fccd3d86c 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 Red Hat, Inc.
* Author: Michael S. Tsirkin <mst@redhat.com>
- * This work is licensed under the terms of the GNU GPL, version 2.
*
* Common macros and functions for ring benchmarking.
*/
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c
index 2d566fbd236b..c9b26335f891 100644
--- a/tools/virtio/ringtest/ptr_ring.c
+++ b/tools/virtio/ringtest/ptr_ring.c
@@ -18,7 +18,6 @@
#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
#define SIZE_MAX (~(size_t)0)
#define KMALLOC_MAX_SIZE SIZE_MAX
-#define BUG_ON(x) assert(x)
typedef pthread_spinlock_t spinlock_t;
diff --git a/tools/virtio/ringtest/ring.c b/tools/virtio/ringtest/ring.c
index 5a41404aaef5..58e7d33bddfc 100644
--- a/tools/virtio/ringtest/ring.c
+++ b/tools/virtio/ringtest/ring.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Red Hat, Inc.
* Author: Michael S. Tsirkin <mst@redhat.com>
- * This work is licensed under the terms of the GNU GPL, version 2.
*
* Simple descriptor-based ring. virtio 0.9 compatible event index is used for
* signalling, unconditionally.
diff --git a/tools/virtio/ringtest/virtio_ring_0_9.c b/tools/virtio/ringtest/virtio_ring_0_9.c
index 5fd3fbcb9e57..13a035a390e9 100644
--- a/tools/virtio/ringtest/virtio_ring_0_9.c
+++ b/tools/virtio/ringtest/virtio_ring_0_9.c
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Red Hat, Inc.
* Author: Michael S. Tsirkin <mst@redhat.com>
- * This work is licensed under the terms of the GNU GPL, version 2.
*
* Partial implementation of virtio 0.9. event index is used for signalling,
* unconditionally. Design roughly follows linux kernel implementation in order
diff --git a/tools/virtio/vhost_test/Makefile b/tools/virtio/vhost_test/Makefile
index a1d35b81b314..94d3aff987dc 100644
--- a/tools/virtio/vhost_test/Makefile
+++ b/tools/virtio/vhost_test/Makefile
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
obj-m += vhost_test.o
EXTRA_CFLAGS += -Idrivers/vhost
diff --git a/tools/virtio/virtio-trace/trace-agent-ctl.c b/tools/virtio/virtio-trace/trace-agent-ctl.c
index a2d0403c4f94..73d253d4b559 100644
--- a/tools/virtio/virtio-trace/trace-agent-ctl.c
+++ b/tools/virtio/virtio-trace/trace-agent-ctl.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Controller of read/write threads for virtio-trace
*
* Copyright (C) 2012 Hitachi, Ltd.
* Created by Yoshihiro Yunomae <yoshihiro.yunomae.ez@hitachi.com>
* Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
- *
- * Licensed under GPL version 2 only.
- *
*/
#define _GNU_SOURCE
diff --git a/tools/virtio/virtio-trace/trace-agent-rw.c b/tools/virtio/virtio-trace/trace-agent-rw.c
index 3aace5ea4842..ddfe7875eb16 100644
--- a/tools/virtio/virtio-trace/trace-agent-rw.c
+++ b/tools/virtio/virtio-trace/trace-agent-rw.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Read/write thread of a guest agent for virtio-trace
*
* Copyright (C) 2012 Hitachi, Ltd.
* Created by Yoshihiro Yunomae <yoshihiro.yunomae.ez@hitachi.com>
* Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
- *
- * Licensed under GPL version 2 only.
- *
*/
#define _GNU_SOURCE
diff --git a/tools/virtio/virtio-trace/trace-agent.c b/tools/virtio/virtio-trace/trace-agent.c
index 0a0a7dd4eff7..cdfe77c2b4c8 100644
--- a/tools/virtio/virtio-trace/trace-agent.c
+++ b/tools/virtio/virtio-trace/trace-agent.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Guest agent for virtio-trace
*
* Copyright (C) 2012 Hitachi, Ltd.
* Created by Yoshihiro Yunomae <yoshihiro.yunomae.ez@hitachi.com>
* Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
- *
- * Licensed under GPL version 2 only.
- *
*/
#define _GNU_SOURCE
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index 1ff3a6c0367b..58c0eab71bca 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* page-types: Tool for querying page flags
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; version 2.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should find a copy of v2 of the GNU General Public License somewhere on
- * your Linux system; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
* Copyright (C) 2009 Intel corporation
*
* Authors: Wu Fengguang <fengguang.wu@intel.com>
@@ -133,7 +121,7 @@ static const char * const page_flag_names[] = {
[KPF_NOPAGE] = "n:nopage",
[KPF_KSM] = "x:ksm",
[KPF_THP] = "t:thp",
- [KPF_BALLOON] = "o:balloon",
+ [KPF_OFFLINE] = "o:offline",
[KPF_PGTABLE] = "g:pgtable",
[KPF_ZERO_PAGE] = "z:zero_page",
[KPF_IDLE] = "i:idle_page",
diff --git a/tools/vm/slabinfo-gnuplot.sh b/tools/vm/slabinfo-gnuplot.sh
index 0cf28aa6f21c..26e193ffd2a2 100644
--- a/tools/vm/slabinfo-gnuplot.sh
+++ b/tools/vm/slabinfo-gnuplot.sh
@@ -1,16 +1,9 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
# Sergey Senozhatsky, 2015
# sergey.senozhatsky.work@gmail.com
#
-# This software is licensed under the terms of the GNU General Public
-# License version 2, as published by the Free Software Foundation, and
-# may be copied, distributed, and modified under those terms.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
# This program is intended to plot a `slabinfo -X' stats, collected,
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index 334b16db0ebb..73818f1b2ef8 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -110,39 +110,42 @@ static void fatal(const char *x, ...)
static void usage(void)
{
printf("slabinfo 4/15/2011. (c) 2007 sgi/(c) 2011 Linux Foundation.\n\n"
- "slabinfo [-ahnpvtsz] [-d debugopts] [slab-regexp]\n"
+ "slabinfo [-aADefhilnosrStTvz1LXBU] [N=K] [-dafzput] [slab-regexp]\n"
"-a|--aliases Show aliases\n"
"-A|--activity Most active slabs first\n"
- "-d<options>|--debug=<options> Set/Clear Debug options\n"
+ "-B|--Bytes Show size in bytes\n"
"-D|--display-active Switch line format to activity\n"
"-e|--empty Show empty slabs\n"
"-f|--first-alias Show first alias\n"
"-h|--help Show usage information\n"
"-i|--inverted Inverted list\n"
"-l|--slabs Show slabs\n"
+ "-L|--Loss Sort by loss\n"
"-n|--numa Show NUMA information\n"
- "-o|--ops Show kmem_cache_ops\n"
+ "-N|--lines=K Show the first K slabs\n"
+ "-o|--ops Show kmem_cache_ops\n"
+ "-r|--report Detailed report on single slabs\n"
"-s|--shrink Shrink slabs\n"
- "-r|--report Detailed report on single slabs\n"
"-S|--Size Sort by size\n"
"-t|--tracking Show alloc/free information\n"
"-T|--Totals Show summary information\n"
+ "-U|--Unreclaim Show unreclaimable slabs only\n"
"-v|--validate Validate slabs\n"
"-z|--zero Include empty slabs\n"
"-1|--1ref Single reference\n"
- "-N|--lines=K Show the first K slabs\n"
- "-L|--Loss Sort by loss\n"
"-X|--Xtotals Show extended summary information\n"
- "-B|--Bytes Show size in bytes\n"
- "-U|--Unreclaim Show unreclaimable slabs only\n"
- "\nValid debug options (FZPUT may be combined)\n"
- "a / A Switch on all debug options (=FZUP)\n"
- "- Switch off all debug options\n"
- "f / F Sanity Checks (SLAB_CONSISTENCY_CHECKS)\n"
- "z / Z Redzoning\n"
- "p / P Poisoning\n"
- "u / U Tracking\n"
- "t / T Tracing\n"
+
+ "\n"
+ "-d | --debug Switch off all debug options\n"
+ "-da | --debug=a Switch on all debug options (--debug=FZPU)\n"
+
+ "\n"
+ "-d[afzput] | --debug=[afzput]\n"
+ " f | F Sanity Checks (SLAB_CONSISTENCY_CHECKS)\n"
+ " z | Z Redzoning\n"
+ " p | P Poisoning\n"
+ " u | U Tracking\n"
+ " t | T Tracing\n"
);
}
diff --git a/tools/wmi/Makefile b/tools/wmi/Makefile
index e0e87239126b..e161ff59ec46 100644
--- a/tools/wmi/Makefile
+++ b/tools/wmi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
PREFIX ?= /usr
SBINDIR ?= sbin
INSTALL ?= install
diff --git a/tools/wmi/dell-smbios-example.c b/tools/wmi/dell-smbios-example.c
index 9d3bde081249..1f3e7ab14b68 100644
--- a/tools/wmi/dell-smbios-example.c
+++ b/tools/wmi/dell-smbios-example.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Sample application for SMBIOS communication over WMI interface
* Performs the following:
@@ -6,10 +7,6 @@
* - Simple activation of a token
*
* Copyright (C) 2017 Dell, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <errno.h>